code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
if n_term == "":
return []
UpperCAmelCase__ : list = []
for temp in range(int(lowerCAmelCase ) ):
series.append(F"1/{temp + 1}" if series else "1" )
return series
if __name__ == "__main__":
A__ : List[str] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__ : Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=16 , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=14 , __UpperCamelCase=10 , __UpperCamelCase=19 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=True , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=[1, 2, 3, 4, 5] , __UpperCamelCase=25 , __UpperCamelCase=5 , )-> int:
UpperCAmelCase__ : Optional[int] = d_model
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = prediction_length
UpperCAmelCase__ : List[Any] = context_length
UpperCAmelCase__ : Optional[Any] = cardinality
UpperCAmelCase__ : Any = num_time_features
UpperCAmelCase__ : Dict = lags_sequence
UpperCAmelCase__ : Optional[int] = embedding_dimension
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Any = context_length
UpperCAmelCase__ : Dict = prediction_length + label_length
UpperCAmelCase__ : Optional[Any] = label_length
UpperCAmelCase__ : List[str] = moving_average
UpperCAmelCase__ : Union[str, Any] = autocorrelation_factor
def lowerCAmelCase__ ( self )-> List[str]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : str = config.context_length + max(config.lags_sequence )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase__ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Tuple = self.get_config()
UpperCAmelCase__ : int = self.prepare_autoformer_inputs_dict(__UpperCamelCase )
return config, inputs_dict
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCAmelCase__ : str = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
UpperCAmelCase__ : Dict = model(**__UpperCamelCase )
UpperCAmelCase__ : int = outputs.encoder_last_hidden_state
UpperCAmelCase__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : int = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Any = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model.create_network_inputs(**__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase__ : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase__ : Optional[int] = encoder(inputs_embeds=__UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
UpperCAmelCase__ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase__ : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase__ : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase__ : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Optional[int] = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCAmelCase__ : Dict = decoder(
trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A = (AutoformerForPrediction,) if is_torch_available() else ()
_A = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = AutoformerModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["missing_keys"] , [] )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[int] = inspect.signature(getattr(__UpperCamelCase , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase__ : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Dict = getattr(self.model_tester , "seq_length" , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , __UpperCamelCase )
UpperCAmelCase__ : str = getattr(self.model_tester , "encoder_seq_length" , __UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = getattr(self.model_tester , "d_model" , __UpperCamelCase )
UpperCAmelCase__ : int = getattr(self.model_tester , "num_attention_heads" , __UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : str = outputs.encoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase__ : Any = len(__UpperCamelCase )
UpperCAmelCase__ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# decoder attentions
UpperCAmelCase__ : str = outputs.decoder_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase__ : Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase__ ( self )-> Optional[int]:
super().test_retain_grad_hidden_states_attentions()
def a__ ( lowerCAmelCase : str="train-batch.pt" ):
'''simple docstring'''
UpperCAmelCase__ : int = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCAmelCase , repo_type="dataset" )
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase , map_location=lowerCAmelCase )
return batch
@require_torch
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : int = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__UpperCamelCase )
UpperCAmelCase__ : int = prepare_batch()
with torch.no_grad():
UpperCAmelCase__ : Any = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase__ : Optional[Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[str] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__UpperCamelCase )
UpperCAmelCase__ : List[str] = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase__ : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase__ : Dict = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__UpperCamelCase )
UpperCAmelCase__ : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase__ : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __UpperCamelCase )
UpperCAmelCase__ : List[str] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__UpperCamelCase )
UpperCAmelCase__ : Any = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1E-1 ) )
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A__ : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , **__UpperCamelCase )-> str:
requires_backends(self , ["bs4"] )
super().__init__(**__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase__ : Optional[Any] = parent.find_all(child.name , recursive=__UpperCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCamelCase ) else next(i for i, s in enumerate(__UpperCamelCase , 1 ) if s is child ) )
UpperCAmelCase__ : List[str] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : str = BeautifulSoup(__UpperCamelCase , "html.parser" )
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Optional[Any] = []
for element in html_code.descendants:
if type(__UpperCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase__ : Union[str, Any] = html.unescape(__UpperCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.xpath_soup(__UpperCamelCase )
stringaxtag_seq.append(__UpperCamelCase )
stringaxsubs_seq.append(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = ""
for tagname, subs in zip(__UpperCamelCase , __UpperCamelCase ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self , __UpperCamelCase )-> BatchFeature:
UpperCAmelCase__ : Union[str, Any] = False
# Check that strings has a valid type
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[Any] = True
elif isinstance(__UpperCamelCase , (list, tuple) ):
if len(__UpperCamelCase ) == 0 or isinstance(html_strings[0] , __UpperCamelCase ):
UpperCAmelCase__ : Dict = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"but is of type {type(__UpperCamelCase )}." )
UpperCAmelCase__ : Dict = bool(isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCamelCase )) )
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [html_strings]
# Get nodes + xpaths
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = []
for html_string in html_strings:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.get_three_from_single(__UpperCamelCase )
nodes.append(__UpperCamelCase )
UpperCAmelCase__ : int = []
for node, tag_list, sub_list in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Dict = self.construct_xpath(__UpperCamelCase , __UpperCamelCase )
xpath_strings.append(__UpperCamelCase )
xpaths.append(__UpperCamelCase )
# return as Dict
UpperCAmelCase__ : Union[str, Any] = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase__ : Any = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
return encoded_inputs
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 32 , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , __UpperCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , __UpperCamelCase = True , __UpperCamelCase=7 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=3 , )-> List[str]:
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Tuple = size if size is not None else {"shortest_edge": 2_88}
UpperCAmelCase__ : int = size_divisor
UpperCAmelCase__ : Optional[int] = do_rescale
UpperCAmelCase__ : int = rescale_factor
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : Optional[int] = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
def lowerCAmelCase__ ( self )-> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> Dict:
if not batched:
UpperCAmelCase__ : str = self.size["shortest_edge"]
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image.shape[1], image.shape[2]
UpperCAmelCase__ : List[str] = size / min(__UpperCamelCase , __UpperCamelCase )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = scale * h, size
UpperCAmelCase__ : List[Any] = int((13_33 / 8_00) * size )
if max(__UpperCamelCase , __UpperCamelCase ) > max_size:
UpperCAmelCase__ : int = max_size / max(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = newh * scale
UpperCAmelCase__ : Union[str, Any] = neww * scale
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase__ , UpperCAmelCase__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase__ : Optional[Any] = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : List[str] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCAmelCase__ : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size_divisor" ) )
def lowerCAmelCase__ ( self )-> Optional[int]:
pass
def lowerCAmelCase__ ( self )-> Any:
# Initialize image processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> List[Any]:
# Initialize image processor
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> Any:
# Initialize image processor
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A__ : Union[str, Any] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ["""DPTFeatureExtractor"""]
A__ : int = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A__ : Any = logging.getLogger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
# save results
if os.path.exists(lowerCAmelCase ):
if os.path.exists(os.path.join(lowerCAmelCase , "config.json" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase , "config.json" ) ):
os.remove(os.path.join(lowerCAmelCase , "config.json" ) )
if os.path.exists(os.path.join(lowerCAmelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(lowerCAmelCase , "pytorch_model.bin" ) )
else:
os.makedirs(lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = 2
if unlogit:
UpperCAmelCase__ : Union[str, Any] = torch.pow(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = p * torch.log(lowerCAmelCase )
UpperCAmelCase__ : Any = 0
return -plogp.sum(dim=-1 )
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(lowerCAmelCase ) ) ) )
for row in range(len(lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=False ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ : List[str] = torch.zeros(lowerCAmelCase , lowerCAmelCase ).to(args.device )
UpperCAmelCase__ : Optional[Any] = torch.zeros(lowerCAmelCase , lowerCAmelCase ).to(args.device )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = torch.ones(lowerCAmelCase , lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : List[str] = 0.0
UpperCAmelCase__ : int = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ : Tuple = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase__) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ : Union[str, Any] = model(lowerCAmelCase , labels=lowerCAmelCase , head_mask=lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Dict = torch.pow(torch.pow(lowerCAmelCase , lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCAmelCase__ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(lowerCAmelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(lowerCAmelCase )
logger.info("Head ranked by importance scores" )
UpperCAmelCase__ : Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ : Any = head_ranks.view_as(lowerCAmelCase )
print_ad_tensor(lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = compute_heads_importance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , compute_entropy=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , lowerCAmelCase , original_score * args.masking_threshold )
UpperCAmelCase__ : str = torch.ones_like(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ : List[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ : Dict = float("Inf" )
UpperCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase__ : Any = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ : Tuple = new_head_mask.view(-1 )
UpperCAmelCase__ : Optional[int] = 0.0
UpperCAmelCase__ : Any = new_head_mask.view_as(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase )
# Compute metric and head importance again
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = compute_heads_importance(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , compute_entropy=lowerCAmelCase , head_mask=lowerCAmelCase )
UpperCAmelCase__ : int = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(lowerCAmelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = compute_heads_importance(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , compute_entropy=lowerCAmelCase , compute_importance=lowerCAmelCase , head_mask=lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = 1 / loss
UpperCAmelCase__ : int = datetime.now() - before_time
UpperCAmelCase__ : Dict = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Optional[int] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = [
v,
]
assert sum(len(lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase )
UpperCAmelCase__ : Tuple = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Optional[int] = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = compute_heads_importance(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , compute_entropy=lowerCAmelCase , compute_importance=lowerCAmelCase , head_mask=lowerCAmelCase , actually_pruned=lowerCAmelCase , )
UpperCAmelCase__ : List[str] = 1 / loss
UpperCAmelCase__ : Optional[int] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowerCAmelCase , lowerCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , lowerCAmelCase , lowerCAmelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(lowerCAmelCase , args.output_dir )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=lowerCAmelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=lowerCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=lowerCAmelCase , type=lowerCAmelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=lowerCAmelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=lowerCAmelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=lowerCAmelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=lowerCAmelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowerCAmelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=lowerCAmelCase , help="Batch size." )
parser.add_argument("--seed" , type=lowerCAmelCase , default=42 )
parser.add_argument("--local_rank" , type=lowerCAmelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=lowerCAmelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCAmelCase , default="" , help="Can be used for distant debugging." )
UpperCAmelCase__ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ : List[str] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase__ : List[str] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ : Any = torch.device("cuda" , args.local_rank )
UpperCAmelCase__ : int = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ : List[str] = nn.parallel.DistributedDataParallel(
lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase )
elif args.n_gpu > 1:
UpperCAmelCase__ : Union[str, Any] = nn.DataParallel(lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , lowerCAmelCase )
# Prepare dataset
UpperCAmelCase__ : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ : str = (torch.from_numpy(lowerCAmelCase ),)
UpperCAmelCase__ : int = TensorDataset(*lowerCAmelCase )
UpperCAmelCase__ : List[Any] = RandomSampler(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ : Optional[int] = mask_heads(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
prune_heads(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'git_vision_model'
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=30_72 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3 , __UpperCamelCase=2_24 , __UpperCamelCase=16 , __UpperCamelCase="quick_gelu" , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , **__UpperCamelCase , )-> Tuple:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : str = hidden_act
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
UpperCAmelCase__ : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'git'
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=6 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10_24 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1_01 , __UpperCamelCase=1_02 , __UpperCamelCase=None , **__UpperCamelCase , )-> List[str]:
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , pad_token_id=__UpperCamelCase , **__UpperCamelCase )
if vision_config is None:
UpperCAmelCase__ : List[str] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
UpperCAmelCase__ : Dict = GitVisionConfig(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[Any] = use_cache
UpperCAmelCase__ : str = tie_word_embeddings
UpperCAmelCase__ : str = num_image_with_embedding
UpperCAmelCase__ : str = bos_token_id
UpperCAmelCase__ : Any = eos_token_id
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : int = self.vision_config.to_dict()
UpperCAmelCase__ : Dict = self.__class__.model_type
return output
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A__ : Union[str, Any] = False, False, False
@dataclass
class _lowercase :
'''simple docstring'''
_A = None
_A = True
_A = True
_A = None
# Automatically constructed
_A = 'dict'
_A = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_A = field(default='Audio' , init=_lowerCamelCase , repr=_lowerCamelCase )
def __call__( self )-> Dict:
return self.pa_type
def lowerCAmelCase__ ( self , __UpperCamelCase )-> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ : Union[str, Any] = BytesIO()
sf.write(A__ , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ : Dict = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
UpperCAmelCase__ : str = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_27_67
UpperCAmelCase__ : Tuple = BytesIO(bytes() )
sf.write(A__ , A__ , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase__ : str = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase__ : Any = token_per_repo_id or {}
UpperCAmelCase__ : int = path.split("::" )[-1]
try:
UpperCAmelCase__ : List[Any] = string_to_dict(A__ , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase__ : Optional[int] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ : Dict = None
with xopen(A__ , "rb" , use_auth_token=A__ ) as f:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sf.read(A__ )
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = sf.read(A__ )
UpperCAmelCase__ : Optional[Any] = array.T
if self.mono:
UpperCAmelCase__ : Tuple = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ : Union[str, Any] = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
UpperCAmelCase__ : Dict = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCAmelCase__ ( self )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase__ : int = pa.array([None] * len(A__ ) , type=pa.binary() )
UpperCAmelCase__ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ : List[Any] = pa.array([None] * len(A__ ) , type=pa.string() )
UpperCAmelCase__ : str = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase__ : int = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase__ : str = storage.field("bytes" )
else:
UpperCAmelCase__ : str = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase__ : List[Any] = storage.field("path" )
else:
UpperCAmelCase__ : Optional[Any] = pa.array([None] * len(A__ ) , type=pa.string() )
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__UpperCamelCase ):
with xopen(A__ , "rb" ) as f:
UpperCAmelCase__ : Optional[int] = f.read()
return bytes_
UpperCAmelCase__ : str = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : List[Any] = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 700 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : Optional[Any] ):
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = set({"(", "[", "{"} )
UpperCAmelCase__ : Optional[int] = set({")", "]", "}"} )
UpperCAmelCase__ : List[Any] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__A ) == 0 or (len(__A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__A ) == 0
def a__ ( ):
UpperCAmelCase__ : Optional[int] = input("Enter sequence of brackets: " )
if is_balanced(__A ):
print(__A , "is balanced" )
else:
print(__A , "is not balanced" )
if __name__ == "__main__":
main()
| 701 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[int] = {'vocab_file': 'spiece.model'}
A__ : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
A__ : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
A__ : int = '▁'
class _lowercase ( snake_case__ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
UpperCAmelCase__ : List[Any] = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
UpperCAmelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
UpperCAmelCase__ : Optional[int] = do_lower_case
UpperCAmelCase__ : Optional[int] = remove_space
UpperCAmelCase__ : int = keep_accents
UpperCAmelCase__ : Tuple = vocab_file
UpperCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase__ ( self )-> Dict:
return len(self.sp_model )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Tuple = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )-> List[str]:
UpperCAmelCase__ : List[str] = self.__dict__.copy()
UpperCAmelCase__ : Optional[Any] = None
return state
def __setstate__( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
if self.remove_space:
UpperCAmelCase__ : Optional[Any] = " ".join(inputs.strip().split() )
else:
UpperCAmelCase__ : Any = inputs
UpperCAmelCase__ : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
UpperCAmelCase__ : int = unicodedata.normalize("NFKD" , UpperCAmelCase_ )
UpperCAmelCase__ : Dict = "".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
UpperCAmelCase__ : Dict = outputs.lower()
return outputs
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = self.preprocess_text(UpperCAmelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
UpperCAmelCase__ : Tuple = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCAmelCase__ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ : Any = cur_pieces[1:]
else:
UpperCAmelCase__ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : str = []
UpperCAmelCase__ : int = ""
UpperCAmelCase__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Optional[int] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
UpperCAmelCase__ : Optional[Any] = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : Dict = [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
UpperCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 702 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_A = ["image_processor", "tokenizer"]
_A = "ChineseCLIPImageProcessor"
_A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
UpperCAmelCase__ : int = kwargs.pop("feature_extractor" )
UpperCAmelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
UpperCAmelCase__ : Union[str, Any] = self.image_processor
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase__ : Optional[Any] = self.tokenizer(A_ , return_tensors=A_ , **A_ )
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
UpperCAmelCase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> str:
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> str:
return self.tokenizer.decode(*A_ , **A_ )
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.model_input_names
UpperCAmelCase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self )-> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
| 703 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A__ : str = logging.getLogger(__name__)
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.argmax(_lowercase , axis=1 )
return np.sum(outputs == labels )
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
with open(_lowercase , encoding="utf_8" ) as f:
UpperCAmelCase__ : List[str] = csv.reader(_lowercase )
UpperCAmelCase__ : Tuple = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for dataset in encoded_datasets:
UpperCAmelCase__ : int = len(_lowercase )
UpperCAmelCase__ : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCAmelCase__ : Optional[int] = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCAmelCase__ : str = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCAmelCase__ : Any = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
UpperCAmelCase__ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase__ : Optional[int] = with_conta
UpperCAmelCase__ : List[Any] = with_conta
UpperCAmelCase__ : Optional[int] = len(_lowercase ) - 1
UpperCAmelCase__ : Optional[Any] = len(_lowercase ) - 1
UpperCAmelCase__ : List[Any] = with_conta
UpperCAmelCase__ : Tuple = with_conta
UpperCAmelCase__ : Any = mc_label
UpperCAmelCase__ : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowercase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_lowercase , type=_lowercase , required=_lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_lowercase , default="" )
parser.add_argument("--eval_dataset" , type=_lowercase , default="" )
parser.add_argument("--seed" , type=_lowercase , default=42 )
parser.add_argument("--num_train_epochs" , type=_lowercase , default=3 )
parser.add_argument("--train_batch_size" , type=_lowercase , default=8 )
parser.add_argument("--eval_batch_size" , type=_lowercase , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_lowercase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_lowercase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_lowercase , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_lowercase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_lowercase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_lowercase , default=0.01 )
parser.add_argument("--lm_coef" , type=_lowercase , default=0.9 )
parser.add_argument("--n_valid" , type=_lowercase , default=374 )
parser.add_argument("--server_ip" , type=_lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowercase , default="" , help="Can be used for distant debugging." )
UpperCAmelCase__ : Dict = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase__ : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCAmelCase__ : Union[str, Any] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_lowercase , _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
UpperCAmelCase__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(lowerCAmelCase : List[Any] ):
if isinstance(_lowercase , _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase , _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info("Encoding dataset..." )
UpperCAmelCase__ : Any = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase__ : List[str] = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase__ : Dict = (train_dataset, eval_dataset)
UpperCAmelCase__ : Optional[int] = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
UpperCAmelCase__ : Optional[Any] = model.config.n_positions // 2 - 2
UpperCAmelCase__ : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase__ : List[str] = min(_lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase__ : Optional[int] = pre_process_datasets(_lowercase , _lowercase , _lowercase , *_lowercase )
UpperCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase__ : Any = TensorDataset(*_lowercase )
UpperCAmelCase__ : Optional[Any] = RandomSampler(_lowercase )
UpperCAmelCase__ : Union[str, Any] = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.train_batch_size )
UpperCAmelCase__ : Optional[int] = TensorDataset(*_lowercase )
UpperCAmelCase__ : List[Any] = SequentialSampler(_lowercase )
UpperCAmelCase__ : Optional[Any] = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase__ : Tuple = args.max_steps
UpperCAmelCase__ : List[str] = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase__ : Dict = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase__ : Optional[int] = list(model.named_parameters() )
UpperCAmelCase__ : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCAmelCase__ : Tuple = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCAmelCase__ : Tuple = AdamW(_lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCAmelCase__ : Optional[int] = get_linear_schedule_with_warmup(
_lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowercase )
if args.do_train:
UpperCAmelCase__ : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Dict = tqdm(_lowercase , desc="Training" )
for step, batch in enumerate(_lowercase ):
UpperCAmelCase__ : Dict = tuple(t.to(_lowercase ) for t in batch )
UpperCAmelCase__ : Dict = batch
UpperCAmelCase__ : Optional[Any] = model(_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
UpperCAmelCase__ : Any = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase__ : str = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase__ : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase__ : List[str] = model.module if hasattr(_lowercase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase__ : Optional[int] = os.path.join(args.output_dir , _lowercase )
UpperCAmelCase__ : List[Any] = os.path.join(args.output_dir , _lowercase )
torch.save(model_to_save.state_dict() , _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
UpperCAmelCase__ : List[Any] = 0, 0
UpperCAmelCase__ : List[str] = 0, 0
for batch in tqdm(_lowercase , desc="Evaluating" ):
UpperCAmelCase__ : str = tuple(t.to(_lowercase ) for t in batch )
UpperCAmelCase__ : Any = batch
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(
_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
UpperCAmelCase__ : List[str] = mc_logits.detach().cpu().numpy()
UpperCAmelCase__ : Any = mc_labels.to("cpu" ).numpy()
UpperCAmelCase__ : int = accuracy(_lowercase , _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase__ : Tuple = eval_loss / nb_eval_steps
UpperCAmelCase__ : Optional[int] = eval_accuracy / nb_eval_examples
UpperCAmelCase__ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase__ : List[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCAmelCase__ : Optional[Any] = os.path.join(args.output_dir , "eval_results.txt" )
with open(_lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
A__ : Optional[int] = logging.get_logger(__name__)
A__ : int = "The Nymphenburg Palace is a beautiful palace in Munich!"
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
UpperCAmelCase__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase__ : List[str] = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_A , output_all_encodings=_A , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase__ : int = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase__ : List[Any] = os.path.join(get_home_dir() , "models" )
UpperCAmelCase__ : List[str] = _load_vocab(_A , _A , _A , cls=_A )
UpperCAmelCase__ : str = nlp.model.BERTModel(
_A , len(_A ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_A , use_token_type_embed=_A , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_A , use_decoder=_A , )
original_bort.load_parameters(_A , cast_dtype=_A , ignore_extra=_A )
UpperCAmelCase__ : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase__ : int = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_A ),
}
UpperCAmelCase__ : Optional[Any] = BertConfig.from_dict(_A )
UpperCAmelCase__ : Optional[Any] = BertForMaskedLM(_A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase : Any , lowerCAmelCase : str ):
UpperCAmelCase__ : List[Any] = hf_param.shape
UpperCAmelCase__ : int = to_torch(params[gluon_param] )
UpperCAmelCase__ : Optional[int] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
UpperCAmelCase__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
UpperCAmelCase__ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
UpperCAmelCase__ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
UpperCAmelCase__ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase__ : List[Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase__ : BertSelfAttention = layer.attention.self
UpperCAmelCase__ : Tuple = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
UpperCAmelCase__ : int = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
UpperCAmelCase__ : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
UpperCAmelCase__ : Optional[int] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
UpperCAmelCase__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
UpperCAmelCase__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
UpperCAmelCase__ : BertSelfOutput = layer.attention.output
UpperCAmelCase__ : Dict = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
UpperCAmelCase__ : str = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
UpperCAmelCase__ : Tuple = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
UpperCAmelCase__ : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
UpperCAmelCase__ : BertIntermediate = layer.intermediate
UpperCAmelCase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
UpperCAmelCase__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
UpperCAmelCase__ : BertOutput = layer.output
UpperCAmelCase__ : str = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
UpperCAmelCase__ : Tuple = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
UpperCAmelCase__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
UpperCAmelCase__ : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase__ : List[str] = RobertaTokenizer.from_pretrained("roberta-base" )
UpperCAmelCase__ : Tuple = tokenizer.encode_plus(_A )["input_ids"]
# Get gluon output
UpperCAmelCase__ : Union[str, Any] = mx.nd.array([input_ids] )
UpperCAmelCase__ : Optional[Any] = original_bort(inputs=_A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = BertModel.from_pretrained(_A )
hf_bort_model.eval()
UpperCAmelCase__ : int = tokenizer.encode_plus(_A , return_tensors="pt" )
UpperCAmelCase__ : Any = hf_bort_model(**_A )[0]
UpperCAmelCase__ : Dict = output_gluon[0].asnumpy()
UpperCAmelCase__ : Optional[Any] = output_hf[0].detach().numpy()
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase__ : List[Any] = np.allclose(_A , _A , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _A )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( __lowerCamelCase ):
'''simple docstring'''
_A = ''
_A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_A = None # compression type in fsspec. ex: "gzip"
_A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , __UpperCamelCase = "" , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase )-> int:
super().__init__(self , **UpperCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ : int = fsspec.open(
UpperCAmelCase_ , mode="rb" , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ : Tuple = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex("." )]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ : str = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase )-> List[str]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCAmelCase_ ).lstrip("/" )
def lowerCAmelCase__ ( self )-> Dict:
if self.dir_cache is None:
UpperCAmelCase__ : Tuple = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
UpperCAmelCase__ : Dict = {f['name']: f}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
return self.file.open().read()
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , )-> Optional[Any]:
UpperCAmelCase__ : str = self._strip_protocol(UpperCAmelCase_ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'" )
return self.file.open()
class _lowercase ( __lowerCamelCase ):
'''simple docstring'''
_A = 'bz2'
_A = 'bz2'
_A = '.bz2'
class _lowercase ( __lowerCamelCase ):
'''simple docstring'''
_A = 'gzip'
_A = 'gzip'
_A = '.gz'
class _lowercase ( __lowerCamelCase ):
'''simple docstring'''
_A = 'lz4'
_A = 'lz4'
_A = '.lz4'
class _lowercase ( __lowerCamelCase ):
'''simple docstring'''
_A = 'xz'
_A = 'xz'
_A = '.xz'
class _lowercase ( __lowerCamelCase ):
'''simple docstring'''
_A = 'zstd'
_A = 'zstd'
_A = '.zst'
def __init__( self , __UpperCamelCase , __UpperCamelCase = "rb" , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = DEFAULT_BLOCK_SIZE , **__UpperCamelCase , )-> str:
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ : str = self.file.__enter__
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : str = file_
def __enter__( self )-> Optional[Any]:
self._file.__enter__()
return self
def __exit__( self , *__UpperCamelCase , **__UpperCamelCase )-> int:
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __iter__( self )-> Dict:
return iter(self._file )
def lowerCAmelCase__ ( self )-> int:
return next(self._file )
def __getattr__( self , __UpperCamelCase )-> Union[str, Any]:
return getattr(self._file , UpperCAmelCase_ )
def fixed_enter(*__UpperCamelCase , **__UpperCamelCase ):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_ ) )
UpperCAmelCase__ : str = fixed_enter
| 706 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A__ : Any = None
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A__ : Dict = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A__ : Tuple = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
A__ : Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _lowercase ( UpperCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ['input_ids', 'attention_mask']
_A = MBartTokenizer
_A = []
_A = []
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , )-> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
UpperCAmelCase__ : List[Any] = vocab_file
UpperCAmelCase__ : List[Any] = False if not self.vocab_file else True
UpperCAmelCase__ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase__ : Tuple = {
lang_code: self.convert_tokens_to_ids(_lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase__ : List[Any] = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase__ : int = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase__ ( self )-> str:
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Any = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
UpperCAmelCase__ : Optional[int] = self.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ : Optional[int] = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "en_XX" , __UpperCamelCase = None , __UpperCamelCase = "ro_RO" , **__UpperCamelCase , )-> BatchEncoding:
UpperCAmelCase__ : Optional[int] = src_lang
UpperCAmelCase__ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self )-> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : str = self.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Union[str, Any] = self.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase__ : Tuple = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 707 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Any = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ : Any = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_image_processor()
UpperCAmelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase__ : str = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
UpperCAmelCase__ : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase__ : str = self.prepare_image_inputs()
UpperCAmelCase__ : Any = image_processor(__UpperCamelCase , return_tensors="np" )
UpperCAmelCase__ : Union[str, Any] = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Dict = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase__ : int = '''lower newer'''
UpperCAmelCase__ : List[str] = processor(text=__UpperCamelCase )
UpperCAmelCase__ : Tuple = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[int] = self.get_image_processor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Dict = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase__ : Dict = '''lower newer'''
UpperCAmelCase__ : List[Any] = self.prepare_image_inputs()
UpperCAmelCase__ : Dict = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCamelCase ):
processor()
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = self.get_image_processor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : str = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : Tuple = processor.batch_decode(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = '''lower newer'''
UpperCAmelCase__ : Dict = self.prepare_image_inputs()
UpperCAmelCase__ : Any = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 708 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Any = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : Any = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : Optional[int] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : Any = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : Any = crop_size
UpperCAmelCase__ : List[Any] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : int = rescale_factor
UpperCAmelCase__ : List[str] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : str = get_resize_output_image_size(_lowerCAmelCase , size["shortest_edge"] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> List[Any]:
UpperCAmelCase__ : Optional[int] = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> List[Any]:
UpperCAmelCase__ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Optional[int] = image - (scale / 2)
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> Optional[Any]:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Dict = to_numpy_array(_lowerCAmelCase )
if do_resize:
UpperCAmelCase__ : List[str] = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
UpperCAmelCase__ : Optional[int] = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
UpperCAmelCase__ : Tuple = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase , offset=_lowerCAmelCase )
if do_normalize:
UpperCAmelCase__ : Tuple = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> Any:
UpperCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : str = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[Any] = size if size is not None else self.size
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase__ : int = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Any = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : Tuple = make_batched(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , offset=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : str = {"pixel_values": videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 709 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 0 |
"""simple docstring"""
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = multiprocessing.Manager()
UpperCAmelCase__ : str = manager.list()
UpperCAmelCase__ : List[str] = multiprocessing.Process(target=lowerCamelCase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase__ : List[Any] = shutil.rmtree
UpperCAmelCase__ : List[str] = os.rmdir
UpperCAmelCase__ : Union[str, Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase__ : Optional[Any] = {}
with swallow_io():
with time_limit(lowerCamelCase__ ):
exec(lowerCamelCase__ , lowerCamelCase__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
UpperCAmelCase__ : Optional[Any] = rmtree
UpperCAmelCase__ : List[str] = rmdir
UpperCAmelCase__ : int = chdir
@contextlib.contextmanager
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
def signal_handler(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , lowerCamelCase__ )
signal.signal(signal.SIGALRM , lowerCamelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCamelCase__ ):
with contextlib.redirect_stderr(lowerCamelCase__ ):
with redirect_stdin(lowerCamelCase__ ):
yield
@contextlib.contextmanager
def a__ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCamelCase__ ):
yield dirname
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
pass
class _lowercase ( io.StringIO ):
'''simple docstring'''
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
raise OSError
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
raise OSError
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Dict:
raise OSError
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> str:
return False
class _lowercase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
_A = "stdin"
@contextlib.contextmanager
def a__ ( lowerCAmelCase : Dict ):
'''simple docstring'''
if root == ".":
yield
return
UpperCAmelCase__ : str = os.getcwd()
os.chdir(lowerCamelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCamelCase__ )
def a__ ( lowerCAmelCase : List[str]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : int = None
import os
UpperCAmelCase__ : Union[str, Any] = "1"
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[Any] = None
import shutil
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : int = None
import subprocess
UpperCAmelCase__ : Optional[Any] = None # type: ignore
UpperCAmelCase__ : List[str] = None
import sys
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Any = None
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'timm_backbone'
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**__A )
UpperCAmelCase__ : Optional[int] = backbone
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Tuple = features_only
UpperCAmelCase__ : Optional[Any] = use_pretrained_backbone
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = out_indices if out_indices is not None else (-1,)
| 711 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _lowercase ( UpperCamelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowerCAmelCase ) )]
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase__ : str = all_rotations(__lowerCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCAmelCase ),
}
return response
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase__ : str = int(__lowerCAmelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowerCAmelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase__ : List[str] = [""""""] * len(__lowerCAmelCase )
for _ in range(len(__lowerCAmelCase ) ):
for i in range(len(__lowerCAmelCase ) ):
UpperCAmelCase__ : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__ : Union[str, Any] = "Provide a string that I will generate its BWT transform: "
A__ : List[str] = input(entry_msg).strip()
A__ : List[str] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string \'{s}\' results """
f"""in \'{result["bwt_string"]}\'"""
)
A__ : Dict = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' """
f"""we get original string \'{original_string}\'"""
)
| 712 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : Optional[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : List[Any] = TaTokenizerFast
A__ : Tuple = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Dict = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 713 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : List[Any] = (32, 32)
UpperCAmelCase__ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowerCAmelCase__ ( self )-> int:
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self )-> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(_UpperCAmelCase )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.dummy_cond_unet_upscale
UpperCAmelCase__ : Union[str, Any] = DDPMScheduler()
UpperCAmelCase__ : Any = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase__ : Dict = self.dummy_vae
UpperCAmelCase__ : List[Any] = self.dummy_text_encoder
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : List[str] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : str = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=3_50 , )
UpperCAmelCase__ : Optional[int] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase__ : List[str] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase__ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase__ : str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : int = self.dummy_cond_unet_upscale
UpperCAmelCase__ : Dict = DDPMScheduler()
UpperCAmelCase__ : List[str] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase__ : str = self.dummy_vae
UpperCAmelCase__ : Tuple = self.dummy_text_encoder
UpperCAmelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : str = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=3_50 , )
UpperCAmelCase__ : Dict = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Dict = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
assert image.shape[0] == 2
UpperCAmelCase__ : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : int = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = self.dummy_cond_unet_upscale
UpperCAmelCase__ : Tuple = DDPMScheduler()
UpperCAmelCase__ : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase__ : Tuple = self.dummy_vae
UpperCAmelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase__ : Any = unet.half()
UpperCAmelCase__ : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : Any = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=3_50 , )
UpperCAmelCase__ : List[Any] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase__ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase__ : List[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase__ : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = "a cat sitting on a park bench"
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase__ : Any = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase__ : Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase__ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Dict = "a cat sitting on a park bench"
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self )-> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase__ : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase__ : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : int = "a cat sitting on a park bench"
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : Optional[int] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : str = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3 ) )
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : str = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : int = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3 ) )
| 715 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowercase ( a__ , unittest.TestCase ):
'''simple docstring'''
_A = DanceDiffusionPipeline
_A = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
_A = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_A , use_timestep_embedding=_A , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
UpperCAmelCase__ : int = IPNDMScheduler()
UpperCAmelCase__ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> Optional[Any]:
if str(_A ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(_A )
else:
UpperCAmelCase__ : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = DanceDiffusionPipeline(**_A )
UpperCAmelCase__ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : List[str] = pipe(**_A )
UpperCAmelCase__ : List[Any] = output.audios
UpperCAmelCase__ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCAmelCase__ : Optional[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase__ ( self )-> str:
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase__ ( self )-> str:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCAmelCase__ ( self )-> Any:
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase__ ( self )-> List[Any]:
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase__ ( self )-> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = torch_device
UpperCAmelCase__ : int = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
UpperCAmelCase__ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(generator=_A , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCAmelCase__ : str = output.audios
UpperCAmelCase__ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase__ : Union[str, Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = torch_device
UpperCAmelCase__ : Tuple = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = pipe(generator=_A , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCAmelCase__ : Union[str, Any] = output.audios
UpperCAmelCase__ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase__ : List[str] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 716 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[Any] = ort.SessionOptions()
UpperCAmelCase__ : int = False
return options
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase__ : Union[str, Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = "A red cat sitting on a park bench"
UpperCAmelCase__ : Optional[Any] = np.random.RandomState(0 )
UpperCAmelCase__ : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowerCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 717 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Any = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 't5'
_A = ['past_key_values']
_A = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , __UpperCamelCase=3_21_28 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=20_48 , __UpperCamelCase=6 , __UpperCamelCase=None , __UpperCamelCase=8 , __UpperCamelCase=32 , __UpperCamelCase=1_28 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=1.0 , __UpperCamelCase="relu" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=0 , __UpperCamelCase=1 , **__UpperCamelCase , )-> Optional[Any]:
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : List[str] = d_kv
UpperCAmelCase__ : Tuple = d_ff
UpperCAmelCase__ : str = num_layers
UpperCAmelCase__ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase__ : Union[str, Any] = num_heads
UpperCAmelCase__ : Any = relative_attention_num_buckets
UpperCAmelCase__ : Optional[Any] = relative_attention_max_distance
UpperCAmelCase__ : str = dropout_rate
UpperCAmelCase__ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_factor
UpperCAmelCase__ : Optional[Any] = feed_forward_proj
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Any = self.feed_forward_proj.split("-" )
UpperCAmelCase__ : List[str] = act_info[-1]
UpperCAmelCase__ : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase__ : Dict = '''gelu_new'''
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
UpperCAmelCase__ : Tuple = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCAmelCase__ : List[Any] = '''past_encoder_sequence + sequence'''
UpperCAmelCase__ : List[Any] = {0: '''batch'''}
UpperCAmelCase__ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="inputs" )
return common_inputs
@property
def lowerCAmelCase__ ( self )-> int:
return 13
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : str = logging.get_logger(__name__)
A__ : Dict = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 'instructblip_vision_model'
def __init__( self , __UpperCamelCase=14_08 , __UpperCamelCase=61_44 , __UpperCamelCase=39 , __UpperCamelCase=16 , __UpperCamelCase=2_24 , __UpperCamelCase=14 , __UpperCamelCase="gelu" , __UpperCamelCase=1E-6 , __UpperCamelCase=0.0 , __UpperCamelCase=1E-10 , __UpperCamelCase=True , **__UpperCamelCase , )-> int:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Any = attention_dropout
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[int] = qkv_bias
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 'instructblip_qformer'
def __init__( self , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=2 , __UpperCamelCase=14_08 , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : str = position_embedding_type
UpperCAmelCase__ : List[Any] = cross_attention_frequency
UpperCAmelCase__ : Union[str, Any] = encoder_hidden_size
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ : str = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 'instructblip'
_A = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=32 , **__UpperCamelCase )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
if vision_config is None:
UpperCAmelCase__ : List[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase__ : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCAmelCase__ : List[str] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase__ : Tuple = InstructBlipVisionConfig(**__UpperCamelCase )
UpperCAmelCase__ : str = InstructBlipQFormerConfig(**__UpperCamelCase )
UpperCAmelCase__ : Any = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase__ : Dict = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
UpperCAmelCase__ : Any = self.text_config.tie_word_embeddings
UpperCAmelCase__ : Union[str, Any] = self.text_config.is_encoder_decoder
UpperCAmelCase__ : Any = num_query_tokens
UpperCAmelCase__ : List[str] = self.vision_config.hidden_size
UpperCAmelCase__ : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase__ : List[Any] = 1.0
UpperCAmelCase__ : Optional[int] = 0.02
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> Dict:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Optional[Any] = self.vision_config.to_dict()
UpperCAmelCase__ : str = self.qformer_config.to_dict()
UpperCAmelCase__ : str = self.text_config.to_dict()
UpperCAmelCase__ : Tuple = self.__class__.model_type
return output
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowercase :
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Union[str, Any] = num_of_nodes
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = {}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__ : Any = self.find_component(snake_case_ )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__ : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__ : Union[str, Any] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase__ ( self )-> None:
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Optional[int] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__ : Optional[int] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = edge
UpperCAmelCase__ : Dict = self.m_component[u]
UpperCAmelCase__ : int = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__ : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = edge
UpperCAmelCase__ : Optional[Any] = self.m_component[u]
UpperCAmelCase__ : int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__ : Dict = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def a__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A__ : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : str):
'''simple docstring'''
for attribute in key.split("."):
UpperCAmelCase__ : Dict = getattr(__UpperCamelCase , __UpperCamelCase)
if weight_type is not None:
UpperCAmelCase__ : Optional[Any] = getattr(__UpperCamelCase , __UpperCamelCase).shape
else:
UpperCAmelCase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase__ : Any = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Optional[int] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : Optional[int] = value
else:
UpperCAmelCase__ : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Any):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = fairseq_model.state_dict()
UpperCAmelCase__ : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase__ : Tuple = None
for name, value in fairseq_dict.items():
UpperCAmelCase__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : Dict = True
elif name.split(".")[0] == "proj":
UpperCAmelCase__ : List[Any] = fairseq_model.proj
UpperCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
UpperCAmelCase__ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase__ : Union[str, Any] = name.split(__UpperCamelCase)[0].split(".")[-2]
UpperCAmelCase__ : Tuple = mapped_key.replace("*" , __UpperCamelCase)
if "weight_g" in name:
UpperCAmelCase__ : Dict = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ : List[str] = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ : List[str] = """bias"""
elif "weight" in name:
UpperCAmelCase__ : List[str] = """weight"""
else:
UpperCAmelCase__ : Tuple = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
continue
if not is_used:
unused_weights.append(__UpperCamelCase)
logger.warning(F"Unused weights: {unused_weights}")
return proj_weight
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str):
'''simple docstring'''
UpperCAmelCase__ : Dict = full_name.split("conv_layers.")[-1]
UpperCAmelCase__ : str = name.split(".")
UpperCAmelCase__ : Any = int(items[0])
UpperCAmelCase__ : List[Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase__ : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase__ : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase__ : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(__UpperCamelCase)
def a__ ( lowerCAmelCase : str):
'''simple docstring'''
UpperCAmelCase__ : Dict = emb.weight.shape
UpperCAmelCase__ : List[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase)
UpperCAmelCase__ : Dict = emb.weight.data
return lin_layer
def a__ ( lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
with open(__UpperCamelCase , "r" , encoding="utf-8") as f:
UpperCAmelCase__ : Tuple = f.readlines()
UpperCAmelCase__ : Optional[Any] = [line.split(" ")[0] for line in lines]
UpperCAmelCase__ : Optional[int] = len(__UpperCamelCase)
UpperCAmelCase__ : Union[str, Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4))))
return vocab_dict
@torch.no_grad()
def a__ ( lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase)
UpperCAmelCase__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase)
UpperCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
UpperCAmelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
UpperCAmelCase__ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase__ : Any = WavaVecaModel(__UpperCamelCase)
UpperCAmelCase__ : List[str] = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase)
UpperCAmelCase__ : List[str] = SpeechaTextaForCausalLM(__UpperCamelCase)
UpperCAmelCase__ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase)
# set output linear layer
unexpected_keys.remove("embed_out")
UpperCAmelCase__ : int = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}")
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
UpperCAmelCase__ : Any = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase)
UpperCAmelCase__ : Tuple = False
# add projection layer
UpperCAmelCase__ : int = nn.Parameter(projection_layer.weight)
UpperCAmelCase__ : Union[str, Any] = nn.Parameter(projection_layer.bias)
UpperCAmelCase__ : Dict = create_vocab_dict(__UpperCamelCase)
with open(os.path.join(__UpperCamelCase , "vocab.json") , "w") as fp:
json.dump(__UpperCamelCase , __UpperCamelCase)
UpperCAmelCase__ : str = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , "vocab.json"))
tokenizer.save_pretrained(__UpperCamelCase)
UpperCAmelCase__ : Dict = hf_wavavec.config.to_dict()
UpperCAmelCase__ : List[str] = tokenizer.pad_token_id
UpperCAmelCase__ : Union[str, Any] = tokenizer.bos_token_id
UpperCAmelCase__ : str = tokenizer.eos_token_id
UpperCAmelCase__ : str = """speech_to_text_2"""
UpperCAmelCase__ : Any = """wav2vec2"""
UpperCAmelCase__ : Dict = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase)
hf_wavavec.save_pretrained(__UpperCamelCase)
feature_extractor.save_pretrained(__UpperCamelCase)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
A__ : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 721 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_A = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase__ ( self , __UpperCamelCase=0 )-> Dict:
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(A_ ) )
UpperCAmelCase__ : List[str] = np.random.RandomState(A_ )
UpperCAmelCase__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs()
UpperCAmelCase__ : List[Any] = pipe(**A_ ).images
UpperCAmelCase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase__ : List[Any] = pipe(**A_ ).images
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
UpperCAmelCase__ : Dict = pipe(**self.get_dummy_inputs() )
UpperCAmelCase__ : str = self.get_dummy_inputs()
UpperCAmelCase__ : Dict = pipe(**A_ ).images
UpperCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase__ : str = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : Any = self.get_dummy_inputs()
UpperCAmelCase__ : Union[str, Any] = pipe(**A_ ).images
UpperCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : int = self.get_dummy_inputs()
UpperCAmelCase__ : int = pipe(**A_ ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase__ : str = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : Dict = self.get_dummy_inputs()
UpperCAmelCase__ : Dict = pipe(**A_ ).images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase__ : str = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = ort.SessionOptions()
UpperCAmelCase__ : int = False
return options
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase__ : List[Any] = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
UpperCAmelCase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : Union[str, Any] = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ : Any = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images
UpperCAmelCase__ : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase__ : Tuple = init_image.resize((7_68, 5_12) )
UpperCAmelCase__ : List[str] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ : Any = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ : Union[str, Any] = np.random.RandomState(0 )
UpperCAmelCase__ : Union[str, Any] = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="np" , )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Union[str, Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
UpperCAmelCase__ : str = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 700 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
_A = LEDTokenizer
_A = LEDTokenizerFast
_A = True
def lowerCAmelCase__ ( self )-> List[str]:
super().setUp()
UpperCAmelCase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase__ : Union[str, Any] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
UpperCAmelCase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase__ : str = {"unk_token": "<unk>"}
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self )-> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowerCAmelCase__ ( self )-> List[str]:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase__ : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Any = tokenizer(__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@require_torch
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Dict = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
self.assertIn("input_ids" , __UpperCamelCase )
self.assertIn("attention_mask" , __UpperCamelCase )
self.assertNotIn("labels" , __UpperCamelCase )
self.assertNotIn("decoder_attention_mask" , __UpperCamelCase )
@require_torch
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Tuple = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Tuple = tokenizer(text_target=__UpperCamelCase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase__ ( self )-> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Union[str, Any] = ["A long paragraph for summarization."]
UpperCAmelCase__ : List[str] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : List[Any] = tokenizer(__UpperCamelCase , return_tensors="pt" )
UpperCAmelCase__ : List[Any] = tokenizer(text_target=__UpperCamelCase , return_tensors="pt" )
UpperCAmelCase__ : List[Any] = inputs["input_ids"]
UpperCAmelCase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__ ( self )-> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : str = ["Summary of the text.", "Another summary."]
UpperCAmelCase__ : Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase__ : Dict = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
UpperCAmelCase__ : str = [[0] * len(__UpperCamelCase ) for x in encoded_output["input_ids"]]
UpperCAmelCase__ : Dict = tokenizer.pad(__UpperCamelCase )
self.assertSequenceEqual(outputs["global_attention_mask"] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
pass
def lowerCAmelCase__ ( self )-> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Dict = "A, <mask> AllenNLP sentence."
UpperCAmelCase__ : List[Any] = tokenizer_r.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__UpperCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__UpperCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 701 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 0 |
"""simple docstring"""
A__ : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A__ : Optional[int] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def a__ ( lowerCAmelCase : dict[int, list[int]] , lowerCAmelCase : int , lowerCAmelCase : list[bool] ):
'''simple docstring'''
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
order.append(lowerCAmelCase )
return order
def a__ ( lowerCAmelCase : dict[int, list[int]] , lowerCAmelCase : int , lowerCAmelCase : list[bool] ):
'''simple docstring'''
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return component
def a__ ( lowerCAmelCase : dict[int, list[int]] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) * [False]
UpperCAmelCase__ : Tuple = {vert: [] for vert in range(len(lowerCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCAmelCase )
UpperCAmelCase__ : List[str] = []
for i, was_visited in enumerate(lowerCAmelCase ):
if not was_visited:
order += topology_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : int = []
UpperCAmelCase__ : str = len(lowerCAmelCase ) * [False]
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase__ : int = order[len(lowerCAmelCase ) - i - 1]
if not visited[vert]:
UpperCAmelCase__ : Union[str, Any] = find_components(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
components_list.append(lowerCAmelCase )
return components_list
| 702 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
from __future__ import annotations
def a__ ( lowerCAmelCase : list[int] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[str] = len(snake_case_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase__ : Optional[Any] = i + 1
else:
UpperCAmelCase__ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 703 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
from math import factorial
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_lowerCamelCase ) // (factorial(_lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 704 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : Tuple = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["""ConvNextFeatureExtractor"""]
A__ : Optional[Any] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 0 |
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def a__ ( lowerCAmelCase : int = 10**6 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( __UpperCamelCase )-> List[str]:
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase__ ( self )-> Tuple:
raise NotImplementedError()
| 708 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Optional[int] = logging.get_logger(__name__)
# General docstring
A__ : Tuple = '''RegNetConfig'''
# Base docstring
A__ : Any = '''facebook/regnet-y-040'''
A__ : int = [1, 1_088, 7, 7]
# Image classification docstring
A__ : Any = '''facebook/regnet-y-040'''
A__ : Union[str, Any] = '''tabby, tabby cat'''
A__ : Optional[Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = 3 , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = "relu" , **__UpperCamelCase , )-> List[Any]:
super().__init__(**lowerCAmelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase__ : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase__ : Optional[Any] = tf.keras.layers.ConvaD(
filters=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , strides=lowerCAmelCase__ , padding="VALID" , groups=lowerCAmelCase__ , use_bias=lowerCAmelCase__ , name="convolution" , )
UpperCAmelCase__ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
UpperCAmelCase__ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Any = self.convolution(self.padding(lowerCAmelCase__ ) )
UpperCAmelCase__ : Optional[int] = self.normalization(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = self.activation(lowerCAmelCase__ )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> int:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = config.num_channels
UpperCAmelCase__ : Optional[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = shape_list(lowerCAmelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase__ : List[str] = tf.transpose(lowerCAmelCase__ , perm=(0, 2, 3, 1) )
UpperCAmelCase__ : Optional[Any] = self.embedder(lowerCAmelCase__ )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = 2 , **__UpperCamelCase )-> List[str]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = tf.keras.layers.ConvaD(
filters=lowerCAmelCase__ , kernel_size=1 , strides=lowerCAmelCase__ , use_bias=lowerCAmelCase__ , name="convolution" )
UpperCAmelCase__ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False )-> tf.Tensor:
return self.normalization(self.convolution(lowerCAmelCase__ ) , training=lowerCAmelCase__ )
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase__ , name="pooler" )
UpperCAmelCase__ : Optional[int] = [
tf.keras.layers.ConvaD(filters=lowerCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
UpperCAmelCase__ : Dict = self.pooler(lowerCAmelCase__ )
for layer_module in self.attention:
UpperCAmelCase__ : Optional[Any] = layer_module(lowerCAmelCase__ )
UpperCAmelCase__ : int = hidden_state * pooled
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , **__UpperCamelCase )-> int:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Dict = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Optional[Any] = (
TFRegNetShortCut(lowerCAmelCase__ , stride=lowerCAmelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase__ : Dict = [
TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ , name="layer.2" ),
]
UpperCAmelCase__ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Any = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : Tuple = layer_module(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
UpperCAmelCase__ : int = self.activation(lowerCAmelCase__ )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , **__UpperCamelCase )-> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Any = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : str = (
TFRegNetShortCut(lowerCAmelCase__ , stride=lowerCAmelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
UpperCAmelCase__ : Optional[int] = [
TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ , name="layer.3" ),
]
UpperCAmelCase__ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Union[str, Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : str = layer_module(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
UpperCAmelCase__ : Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 2 , **__UpperCamelCase )-> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
UpperCAmelCase__ : Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , name="layers.0" ),
*[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
for layer_module in self.layers:
UpperCAmelCase__ : Dict = layer_module(lowerCAmelCase__ )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> int:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
UpperCAmelCase__ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ , name=F"stages.{i+1}" ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True )-> TFBaseModelOutputWithNoAttention:
UpperCAmelCase__ : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Optional[int] = hidden_states + (hidden_state,)
UpperCAmelCase__ : Any = stage_module(lowerCAmelCase__ )
if output_hidden_states:
UpperCAmelCase__ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
@keras_serializable
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
_A = RegNetConfig
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase__ : int = config
UpperCAmelCase__ : str = TFRegNetEmbeddings(lowerCAmelCase__ , name="embedder" )
UpperCAmelCase__ : Union[str, Any] = TFRegNetEncoder(lowerCAmelCase__ , name="encoder" )
UpperCAmelCase__ : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Any = self.embedder(lowerCAmelCase__ , training=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , training=lowerCAmelCase__ )
UpperCAmelCase__ : int = encoder_outputs[0]
UpperCAmelCase__ : List[Any] = self.pooler(lowerCAmelCase__ )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase__ : Tuple = tf.transpose(lowerCAmelCase__ , perm=(0, 3, 1, 2) )
UpperCAmelCase__ : Optional[Any] = tf.transpose(lowerCAmelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase__ : Tuple = tuple([tf.transpose(lowerCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _lowercase ( a__ ):
'''simple docstring'''
_A = RegNetConfig
_A = """regnet"""
_A = """pixel_values"""
@property
def lowerCAmelCase__ ( self )-> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
A__ : Optional[Any] = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
A__ : Dict = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a__ , )
class _lowercase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = TFRegNetMainLayer(lowerCAmelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
UpperCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : List[str] = self.regnet(
pixel_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , training=lowerCAmelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a__ , )
class _lowercase ( a__ , a__ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> str:
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = config.num_labels
UpperCAmelCase__ : Any = TFRegNetMainLayer(lowerCAmelCase__ , name="regnet" )
# classification head
UpperCAmelCase__ : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
UpperCAmelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Union[str, Any] = self.regnet(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , training=lowerCAmelCase__ )
UpperCAmelCase__ : Dict = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : List[Any] = self.classifier[0](lowerCAmelCase__ )
UpperCAmelCase__ : Dict = self.classifier[1](lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase__ , logits=lowerCAmelCase__ )
if not return_dict:
UpperCAmelCase__ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 709 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 0 |
"""simple docstring"""
from math import ceil
def a__ ( lowerCAmelCase : Tuple = 1001 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase__ : Optional[Any] = 2 * i + 1
UpperCAmelCase__ : Tuple = 2 * i
UpperCAmelCase__ : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A__ : Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 711 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 0 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__ : int = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
UpperCAmelCase__ : Dict = tensor_name.split("." )
for split in splits[:-1]:
UpperCAmelCase__ : int = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCAmelCase__ : List[str] = new_module
UpperCAmelCase__ : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
UpperCAmelCase__ : Union[str, Any] = tensor_name in module._buffers
UpperCAmelCase__ : Optional[Any] = getattr(_lowercase , _lowercase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = False
else:
UpperCAmelCase__ : Dict = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase__ : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase__ : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase__ : Any = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
UpperCAmelCase__ : List[Any] = value.to("cpu" )
if value.dtype == torch.inta:
UpperCAmelCase__ : List[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
UpperCAmelCase__ : Dict = torch.tensor(_lowercase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase ) and fpaa_statistics is None:
UpperCAmelCase__ : Optional[int] = new_value.T
UpperCAmelCase__ : Dict = old_value.__dict__
if is_abit:
UpperCAmelCase__ : List[Any] = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
elif is_abit:
UpperCAmelCase__ : str = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
UpperCAmelCase__ : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCAmelCase__ : Union[str, Any] = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
UpperCAmelCase__ : List[Any] = value.to(_lowercase )
else:
UpperCAmelCase__ : List[Any] = torch.tensor(_lowercase , device=_lowercase )
if is_buffer:
UpperCAmelCase__ : List[Any] = new_value
else:
UpperCAmelCase__ : Dict = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad )
UpperCAmelCase__ : Dict = new_value
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase__ : List[str] = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase , nn.Linear ) or isinstance(_lowercase , _lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ : Optional[Any] = module.weight.shape
else:
UpperCAmelCase__ : Tuple = module.in_features
UpperCAmelCase__ : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase__ : str = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase__ : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase__ : List[Any] = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase__ : int = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase__ : Union[str, Any] = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCAmelCase__ : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a__ ( lowerCAmelCase : int , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase__ : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def a__ ( *lowerCAmelCase : List[str] , **lowerCAmelCase : int ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase )
def a__ ( *lowerCAmelCase : int , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase )
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase__ : str = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase__ : Optional[int] = sum(_lowercase , [] )
UpperCAmelCase__ : Optional[int] = len(_lowercase ) > 0
# Check if it is a base model
UpperCAmelCase__ : List[Any] = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase__ : List[Any] = list(model.named_children() )
UpperCAmelCase__ : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase__ : Union[str, Any] = set(_lowercase ) - set(_lowercase )
UpperCAmelCase__ : List[Any] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCAmelCase__ : int = ['''.weight''', '''.bias''']
UpperCAmelCase__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase__ : int = name.replace(_lowercase , "" )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 712 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A__ : str = logging.get_logger(__name__)
A__ : Dict = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
A__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase__ : str = model_type_to_module_name(__UpperCamelCase )
UpperCAmelCase__ : List[str] = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(__UpperCamelCase , __UpperCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__UpperCamelCase , "__name__" , __UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase__ : int = importlib.import_module("transformers" )
if hasattr(__UpperCamelCase , __UpperCamelCase ):
return getattr(__UpperCamelCase , __UpperCamelCase )
return None
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple = None , lowerCAmelCase : List[Any] = False , lowerCAmelCase : Tuple = False , lowerCAmelCase : str = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : Any = None , lowerCAmelCase : Optional[int] = False , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_file_from_repo(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__UpperCamelCase , encoding="utf-8" ) as reader:
return json.load(__UpperCamelCase )
class _lowercase :
'''simple docstring'''
def __init__( self )-> Union[str, Any]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> Tuple:
UpperCAmelCase__ : Optional[Any] = kwargs.pop("config" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[Any] = kwargs.pop("trust_remote_code" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = True
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = config_dict.get("feature_extractor_type" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
UpperCAmelCase__ : Tuple = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# It could be in `config.feature_extractor_type``
UpperCAmelCase__ : Dict = getattr(_SCREAMING_SNAKE_CASE , "feature_extractor_type" , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase__ : List[Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCAmelCase__ : Union[str, Any] = feature_extractor_class_from_name(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[Any] = feature_extractor_auto_map is not None
UpperCAmelCase__ : Dict = feature_extractor_class is not None or type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase__ : Any = resolve_trust_remote_code(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_remote_code and trust_remote_code:
UpperCAmelCase__ : Any = get_class_from_dynamic_module(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[int] = kwargs.pop("code_revision" , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase__ : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(_SCREAMING_SNAKE_CASE )]
return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
FEATURE_EXTRACTOR_MAPPING.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 713 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : Dict = logging.get_logger(__name__)
A__ : str = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : int = getattr(lowerCAmelCase , lowerCAmelCase )
if weight_type is not None:
UpperCAmelCase__ : Union[str, Any] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
else:
UpperCAmelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase__ : Any = value
elif weight_type == "weight_g":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : Any = value
elif weight_type == "bias":
UpperCAmelCase__ : Optional[Any] = value
else:
UpperCAmelCase__ : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Any = fairseq_model.state_dict()
UpperCAmelCase__ : List[str] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : str = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase__ : Optional[Any] = name.split(lowerCAmelCase )[0].split("." )[-2]
UpperCAmelCase__ : int = mapped_key.replace("*" , lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ : List[Any] = """weight_v"""
elif "weight" in name:
UpperCAmelCase__ : List[str] = """weight"""
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = """bias"""
else:
UpperCAmelCase__ : List[Any] = None
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : Optional[Any] = name.split("." )
UpperCAmelCase__ : int = int(items[0] )
UpperCAmelCase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase__ : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase__ : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase__ : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase__ : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = SEWConfig()
if is_finetuned:
UpperCAmelCase__ : Union[str, Any] = model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase__ : Tuple = model.cfg
UpperCAmelCase__ : List[str] = fs_config.conv_bias
UpperCAmelCase__ : List[Any] = eval(fs_config.conv_feature_layers )
UpperCAmelCase__ : Optional[Any] = [x[0] for x in conv_layers]
UpperCAmelCase__ : Dict = [x[1] for x in conv_layers]
UpperCAmelCase__ : List[str] = [x[2] for x in conv_layers]
UpperCAmelCase__ : str = """gelu"""
UpperCAmelCase__ : Tuple = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
UpperCAmelCase__ : int = 0.0
UpperCAmelCase__ : Union[str, Any] = fs_config.activation_fn.name
UpperCAmelCase__ : str = fs_config.encoder_embed_dim
UpperCAmelCase__ : Union[str, Any] = 0.02
UpperCAmelCase__ : str = fs_config.encoder_ffn_embed_dim
UpperCAmelCase__ : List[str] = 1E-5
UpperCAmelCase__ : List[Any] = fs_config.encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = fs_config.encoder_attention_heads
UpperCAmelCase__ : Any = fs_config.conv_pos_groups
UpperCAmelCase__ : Optional[Any] = fs_config.conv_pos
UpperCAmelCase__ : List[str] = len(lowerCAmelCase )
UpperCAmelCase__ : int = fs_config.encoder_layers
UpperCAmelCase__ : List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase__ : int = model.cfg
UpperCAmelCase__ : Union[str, Any] = fs_config.final_dropout
UpperCAmelCase__ : Tuple = fs_config.layerdrop
UpperCAmelCase__ : Dict = fs_config.activation_dropout
UpperCAmelCase__ : Union[str, Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase__ : str = fs_config.attention_dropout
UpperCAmelCase__ : Any = fs_config.dropout_input
UpperCAmelCase__ : Dict = fs_config.dropout
UpperCAmelCase__ : str = fs_config.mask_channel_length
UpperCAmelCase__ : Any = fs_config.mask_channel_prob
UpperCAmelCase__ : List[Any] = fs_config.mask_length
UpperCAmelCase__ : Dict = fs_config.mask_prob
UpperCAmelCase__ : List[str] = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase__ : Tuple = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if is_finetuned:
UpperCAmelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase__ : int = SEWConfig.from_pretrained(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = convert_config(model[0] , lowerCAmelCase )
UpperCAmelCase__ : Any = model[0].eval()
UpperCAmelCase__ : Optional[int] = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
if is_finetuned:
if dict_path:
UpperCAmelCase__ : int = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ : Optional[Any] = target_dict.pad_index
UpperCAmelCase__ : Any = target_dict.bos_index
UpperCAmelCase__ : List[str] = target_dict.pad_index
UpperCAmelCase__ : List[str] = target_dict.bos_index
UpperCAmelCase__ : str = target_dict.eos_index
UpperCAmelCase__ : Optional[Any] = len(target_dict.symbols )
UpperCAmelCase__ : int = os.path.join(lowerCAmelCase , "vocab.json" )
if not os.path.isdir(lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , lowerCAmelCase )
UpperCAmelCase__ : Tuple = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase , )
UpperCAmelCase__ : Any = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = SEWForCTC(lowerCAmelCase )
else:
UpperCAmelCase__ : int = SEWModel(lowerCAmelCase )
feature_extractor.save_pretrained(lowerCAmelCase )
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
A__ : Optional[Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , )-> Union[str, Any]:
UpperCAmelCase__ : str = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Dict = size
UpperCAmelCase__ : Optional[Any] = do_normalize
UpperCAmelCase__ : int = image_mean
UpperCAmelCase__ : int = image_std
def lowerCAmelCase__ ( self )-> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowercase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
_A = DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Any = DPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self )-> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , "image_mean" ) )
self.assertTrue(hasattr(__lowercase , "image_std" ) )
self.assertTrue(hasattr(__lowercase , "do_normalize" ) )
self.assertTrue(hasattr(__lowercase , "do_resize" ) )
self.assertTrue(hasattr(__lowercase , "size" ) )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
# Initialize image_processing
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self )-> Optional[int]:
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self )-> int:
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 715 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A__ : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A__ : int = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = SavedModel()
UpperCAmelCase__ : Any = []
with open(os.path.join(_lowerCAmelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
UpperCAmelCase__ : Optional[Any] = json.load(_lowerCAmelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_lowerCAmelCase )] )
with open(_lowerCAmelCase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase__ : int = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase__ : Union[str, Any] = sorted(_lowerCAmelCase )
UpperCAmelCase__ : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_lowerCAmelCase )
if strict and len(_lowerCAmelCase ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(_lowerCAmelCase ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*_lowerCAmelCase , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
A__ : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 716 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowercase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self )-> List[str]:
# test for the above condition
self.test()
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[str] = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ : Union[str, Any] = self.advance()
if not self.does_advance(__UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.update(__UpperCamelCase )
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def lowerCAmelCase__ ( self )-> Dict:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self )-> int:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self )-> int:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self , __UpperCamelCase=False )-> str:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowercase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Dict:
super(__UpperCamelCase , self ).__init__()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__UpperCamelCase , __UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
UpperCAmelCase__ : List[Any] = token_ids
UpperCAmelCase__ : str = len(self.token_ids )
UpperCAmelCase__ : str = -1 # the index of the currently fulfilled step
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self )-> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCamelCase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCamelCase )}" )
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = False
if self.does_advance(__UpperCamelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ : Optional[int] = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = completed
else:
# failed to make progress.
UpperCAmelCase__ : int = True
self.reset()
return stepped, completed, reset
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = 0
def lowerCAmelCase__ ( self )-> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCAmelCase__ ( self , __UpperCamelCase=False )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ : List[Any] = self.seqlen
UpperCAmelCase__ : str = self.fulfilled_idx
UpperCAmelCase__ : List[str] = self.completed
return new_constraint
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=True )-> Optional[int]:
UpperCAmelCase__ : Any = max([len(__UpperCamelCase ) for one in nested_token_ids] )
UpperCAmelCase__ : int = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ : List[Any] = root
for tidx, token_id in enumerate(__UpperCamelCase ):
if token_id not in level:
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Tuple = level[token_id]
if no_subsets and self.has_subsets(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F" {nested_token_ids}." )
UpperCAmelCase__ : Dict = root
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
UpperCAmelCase__ : Tuple = self.trie
for current_token in current_seq:
UpperCAmelCase__ : Optional[int] = start[current_token]
UpperCAmelCase__ : str = list(start.keys() )
return next_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : List[str] = self.next_tokens(__UpperCamelCase )
return len(__UpperCamelCase ) == 0
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Optional[int] = list(root.values() )
if len(__UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(__UpperCamelCase ) for nn in next_nodes] )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = self.count_leaves(__UpperCamelCase )
return len(__UpperCamelCase ) != leaf_count
class _lowercase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> List[Any]:
super(__UpperCamelCase , self ).__init__()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__UpperCamelCase , __UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__UpperCamelCase , __UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
UpperCAmelCase__ : int = DisjunctiveTrie(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = nested_token_ids
UpperCAmelCase__ : Dict = self.trie.max_height
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[Any] = self.trie.next_tokens(self.current_seq )
if len(__UpperCamelCase ) == 0:
return None
else:
return token_list
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCamelCase )}" )
UpperCAmelCase__ : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCamelCase )}" )
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = False
if self.does_advance(__UpperCamelCase ):
self.current_seq.append(__UpperCamelCase )
UpperCAmelCase__ : Tuple = True
else:
UpperCAmelCase__ : Optional[Any] = True
self.reset()
UpperCAmelCase__ : str = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ : Tuple = completed
return stepped, completed, reset
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = []
def lowerCAmelCase__ ( self )-> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCAmelCase__ ( self , __UpperCamelCase=False )-> Union[str, Any]:
UpperCAmelCase__ : str = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ : Union[str, Any] = self.seqlen
UpperCAmelCase__ : Tuple = self.current_seq
UpperCAmelCase__ : List[Any] = self.completed
return new_constraint
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ : List[Any] = max([c.seqlen for c in constraints] )
UpperCAmelCase__ : Optional[int] = len(__UpperCamelCase )
UpperCAmelCase__ : Tuple = False
self.init_state()
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = [constraint.copy(stateful=__UpperCamelCase ) for constraint in self.constraints]
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : List[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ : Tuple = constraint.advance()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.append(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.extend(__UpperCamelCase )
else:
UpperCAmelCase__ : Union[str, Any] = self.inprogress_constraint.advance()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.append(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.extend(__UpperCamelCase )
if len(__UpperCamelCase ) == 0:
return None
else:
return token_list
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.add(__UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = False, False
if self.completed:
UpperCAmelCase__ : str = True
UpperCAmelCase__ : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.inprogress_constraint.update(__UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ : Tuple = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__UpperCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = pending_constraint.update(__UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(__UpperCamelCase )
UpperCAmelCase__ : str = None
if not complete and stepped:
UpperCAmelCase__ : Tuple = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ : str = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ : List[str] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCAmelCase__ ( self , __UpperCamelCase=True )-> str:
UpperCAmelCase__ : str = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ : Dict = [
constraint.copy(stateful=__UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ : List[str] = self.inprogress_constraint.copy(stateful=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 717 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ : Tuple = 256_047
A__ : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_A = NllbTokenizer
_A = NllbTokenizerFast
_A = True
_A = True
_A = {}
def lowerCAmelCase__ ( self )-> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = NllbTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Dict = NllbTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase__ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = tokenizer_r.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : Any = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Tuple = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : List[str] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
UpperCAmelCase__ : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : int = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self )-> List[str]:
if not self.test_seqaseq:
return
UpperCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase__ : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
UpperCAmelCase__ : str = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase__ , tgt_texts=lowerCamelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
lowerCamelCase__ , tgt_texts=lowerCamelCase__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , lowerCamelCase__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def lowerCAmelCase__ ( self )-> Tuple:
pass
def lowerCAmelCase__ ( self )-> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : str = [AddedToken("<special>" , lstrip=lowerCamelCase__ )]
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : str = tokenizer_r.encode("Hey this is a <special> token" )
UpperCAmelCase__ : str = tokenizer_r.encode("<special>" , add_special_tokens=lowerCamelCase__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : Tuple = tokenizer_p.encode("Hey this is a <special> token" )
UpperCAmelCase__ : Dict = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = "facebook/nllb-200-distilled-600M"
_A = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_A = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_A = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def lowerCAmelCase__ ( cls )-> Union[str, Any]:
UpperCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
UpperCAmelCase__ : str = 1
return cls
def lowerCAmelCase__ ( self )-> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def lowerCAmelCase__ ( self )-> List[str]:
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : List[Any] = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[int] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCamelCase__ )
UpperCAmelCase__ : Tuple = 10
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self )-> Dict:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : int = NllbTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors="pt" )
UpperCAmelCase__ : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10 , return_tensors="pt" )
UpperCAmelCase__ : List[Any] = targets["input_ids"]
UpperCAmelCase__ : Tuple = shift_tokens_right(
lowerCamelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self )-> None:
UpperCAmelCase__ : Union[str, Any] = [2, 1, 2, -1]
UpperCAmelCase__ : Dict = [1, 2, 3, 4]
def lowerCAmelCase__ ( self )-> list[float]:
UpperCAmelCase__ : Tuple = len(self.first_signal )
UpperCAmelCase__ : Union[str, Any] = len(self.second_signal )
UpperCAmelCase__ : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ )
# create a zero matrix of max_length x max_length
UpperCAmelCase__ : Union[str, Any] = [[0] * max_length for i in range(lowerCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase_ ):
UpperCAmelCase__ : str = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase_ )
for j, item in enumerate(lowerCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCAmelCase__ : Any = np.matmul(np.transpose(lowerCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowercase ( __lowercase ):
_A = ['''image_processor''', '''tokenizer''']
_A = '''AutoImageProcessor'''
_A = '''AutoTokenizer'''
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Tuple:
UpperCAmelCase__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
UpperCAmelCase__ : str = kwargs.pop("feature_extractor" )
UpperCAmelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
UpperCAmelCase__ : Any = self.image_processor
UpperCAmelCase__ : Any = False
def __call__( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
UpperCAmelCase__ : List[Any] = kwargs.pop("images" , __a )
UpperCAmelCase__ : List[str] = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase__ : Any = args[0]
UpperCAmelCase__ : List[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase__ : str = self.image_processor(__a , *__a , **__a )
if text is not None:
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ : List[Any] = encodings["""input_ids"""]
return inputs
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
return self.tokenizer.batch_decode(*__a , **__a )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def lowerCAmelCase__ ( self )-> int:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : int = self.tokenizer
yield
UpperCAmelCase__ : str = self.image_processor
UpperCAmelCase__ : Union[str, Any] = False
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=None )-> Union[str, Any]:
if added_vocab is None:
UpperCAmelCase__ : Optional[Any] = self.tokenizer.get_added_vocab()
UpperCAmelCase__ : Dict = {}
while tokens:
UpperCAmelCase__ : Union[str, Any] = re.search(r"<s_(.*?)>" , __a , re.IGNORECASE )
if start_token is None:
break
UpperCAmelCase__ : List[Any] = start_token.group(1 )
UpperCAmelCase__ : Optional[int] = re.search(rF"</s_{key}>" , __a , re.IGNORECASE )
UpperCAmelCase__ : int = start_token.group()
if end_token is None:
UpperCAmelCase__ : Any = tokens.replace(__a , "" )
else:
UpperCAmelCase__ : Any = end_token.group()
UpperCAmelCase__ : Tuple = re.escape(__a )
UpperCAmelCase__ : Optional[Any] = re.escape(__a )
UpperCAmelCase__ : Dict = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" , __a , re.IGNORECASE )
if content is not None:
UpperCAmelCase__ : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCAmelCase__ : Any = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a )
if value:
if len(__a ) == 1:
UpperCAmelCase__ : str = value[0]
UpperCAmelCase__ : List[Any] = value
else: # leaf nodes
UpperCAmelCase__ : List[Any] = []
for leaf in content.split(r"<sep/>" ):
UpperCAmelCase__ : Optional[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCAmelCase__ : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__a )
if len(output[key] ) == 1:
UpperCAmelCase__ : Optional[Any] = output[key][0]
UpperCAmelCase__ : Tuple = tokens[tokens.find(__a ) + len(__a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a )
if len(__a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=None):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCAmelCase__ , UpperCAmelCase__ : Any = True, True
UpperCAmelCase__ : List[Any] = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
return path
def a__ ( lowerCAmelCase : str , lowerCAmelCase : int):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = -1
for i in range(_UpperCamelCase):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
UpperCAmelCase__ : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a__ ( lowerCAmelCase : int , lowerCAmelCase : str):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = check_circuit_or_path(_UpperCamelCase , _UpperCamelCase)
if check == 3:
print("graph is not Eulerian")
print("no path")
return
UpperCAmelCase__ : Optional[Any] = 1
if check == 2:
UpperCAmelCase__ : Any = odd_node
print("graph has a Euler path")
if check == 1:
print("graph has a Euler cycle")
UpperCAmelCase__ : List[Any] = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
print(_UpperCamelCase)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCAmelCase__ : List[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCAmelCase__ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCAmelCase__ : Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCAmelCase__ : Union[str, Any] = {
1: [],
2: []
# all degree is zero
}
UpperCAmelCase__ : List[str] = 10
check_euler(_UpperCamelCase , _UpperCamelCase)
check_euler(_UpperCamelCase , _UpperCamelCase)
check_euler(_UpperCamelCase , _UpperCamelCase)
check_euler(_UpperCamelCase , _UpperCamelCase)
check_euler(_UpperCamelCase , _UpperCamelCase)
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A__ : int = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Tuple = get_sagemaker_input()
else:
UpperCAmelCase__ : str = get_cluster_input()
return config
def a__ ( lowerCAmelCase : List[str]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ : Dict = subparsers.add_parser("config" , description=lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase )
parser.add_argument(
"--config_file" , default=lowerCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : int = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"accelerate configuration saved at {config_file}" )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Tuple = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'OwlViTImageProcessor'
_A = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
UpperCAmelCase__ : int = kwargs.pop("feature_extractor" )
UpperCAmelCase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="max_length" , __UpperCamelCase="np" , **__UpperCamelCase )-> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )):
UpperCAmelCase__ : str = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : List[Any] = max([len(__UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCamelCase ) != max_num_queries:
UpperCAmelCase__ : Tuple = t + [" "] * (max_num_queries - len(__UpperCamelCase ))
UpperCAmelCase__ : Optional[int] = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
encodings.append(__UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase__ : str = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : Optional[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Dict = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Union[str, Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase__ : Union[str, Any] = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Dict = attention_mask
if query_images is not None:
UpperCAmelCase__ : Optional[Any] = BatchEncoding()
UpperCAmelCase__ : Tuple = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Tuple = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : str = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> int:
return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self )-> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 701 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [[0 for _ in range(lowercase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase__ : Tuple = 1
for n in range(m + 1 ):
for k in range(1 , lowercase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A__ : List[Any] = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
A__ : Tuple = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 702 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Any ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : int ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
UpperCAmelCase__ : Tuple = (
'''Wrong input data\'s dimensions... '''
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase__ : Union[str, Any] = (
'''Wrong input data\'s shape... '''
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
UpperCAmelCase__ : Optional[Any] = (
'''Input data have different datatype... '''
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(lowerCAmelCase_ )
UpperCAmelCase__ : Union[str, Any] = []
for value in value_array:
UpperCAmelCase__ : Tuple = euclidean(lowerCAmelCase_ , dataset[0] )
UpperCAmelCase__ : Optional[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase__ : Optional[Any] = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase__ : List[Any] = temp_dist
UpperCAmelCase__ : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A__ : Optional[Any] = R"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(_UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = """rag"""
_A = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=" / " , __UpperCamelCase=" // " , __UpperCamelCase=5 , __UpperCamelCase=3_00 , __UpperCamelCase=7_68 , __UpperCamelCase=8 , __UpperCamelCase="wiki_dpr" , __UpperCamelCase="train" , __UpperCamelCase="compressed" , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , )-> Tuple:
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase__ : List[Any] = kwargs.pop("question_encoder" )
UpperCAmelCase__ : Tuple = question_encoder_config.pop("model_type" )
UpperCAmelCase__ : List[str] = kwargs.pop("generator" )
UpperCAmelCase__ : List[str] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase__ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
UpperCAmelCase__ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
UpperCAmelCase__ : int = reduce_loss
UpperCAmelCase__ : Optional[int] = label_smoothing
UpperCAmelCase__ : Dict = exclude_bos_score
UpperCAmelCase__ : Union[str, Any] = do_marginalize
UpperCAmelCase__ : Union[str, Any] = title_sep
UpperCAmelCase__ : int = doc_sep
UpperCAmelCase__ : int = n_docs
UpperCAmelCase__ : List[str] = max_combined_length
UpperCAmelCase__ : Tuple = dataset
UpperCAmelCase__ : int = dataset_split
UpperCAmelCase__ : str = index_name
UpperCAmelCase__ : List[str] = retrieval_vector_size
UpperCAmelCase__ : Dict = retrieval_batch_size
UpperCAmelCase__ : str = passages_path
UpperCAmelCase__ : Union[str, Any] = index_path
UpperCAmelCase__ : Tuple = use_dummy_dataset
UpperCAmelCase__ : Dict = output_retrieved
UpperCAmelCase__ : str = do_deduplication
UpperCAmelCase__ : Any = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase__ : Any = getattr(self.generator , "forced_eos_token_id" , lowercase__ )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Any = self.question_encoder.to_dict()
UpperCAmelCase__ : Dict = self.generator.to_dict()
UpperCAmelCase__ : Union[str, Any] = self.__class__.model_type
return output
| 704 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_A = 42
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=("DownEncoderBlock2D",) , __UpperCamelCase=(64,) , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase="silu" , __UpperCamelCase=True , )-> Dict:
super().__init__()
UpperCAmelCase__ : Optional[Any] = layers_per_block
UpperCAmelCase__ : List[str] = torch.nn.Convad(
__snake_case , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : str = nn.ModuleList([] )
# down
UpperCAmelCase__ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(__snake_case ):
UpperCAmelCase__ : Union[str, Any] = output_channel
UpperCAmelCase__ : Tuple = block_out_channels[i]
UpperCAmelCase__ : Dict = i == len(__snake_case ) - 1
UpperCAmelCase__ : int = get_down_block(
__snake_case , num_layers=self.layers_per_block , in_channels=__snake_case , out_channels=__snake_case , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__snake_case , resnet_groups=__snake_case , attention_head_dim=__snake_case , temb_channels=__snake_case , )
self.down_blocks.append(__snake_case )
# mid
UpperCAmelCase__ : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__snake_case , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__snake_case , temb_channels=__snake_case , )
# out
UpperCAmelCase__ : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__snake_case , eps=1E-6 )
UpperCAmelCase__ : Union[str, Any] = nn.SiLU()
UpperCAmelCase__ : Any = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ : Optional[int] = nn.Convad(block_out_channels[-1] , __snake_case , 3 , padding=1 )
UpperCAmelCase__ : str = False
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = x
UpperCAmelCase__ : Optional[Any] = self.conv_in(__snake_case )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCamelCase ):
def custom_forward(*__UpperCamelCase ):
return module(*__snake_case )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__snake_case ) , __snake_case , use_reentrant=__snake_case )
# middle
UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __snake_case , use_reentrant=__snake_case )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(__snake_case ) , __snake_case )
# middle
UpperCAmelCase__ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __snake_case )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ : Optional[Any] = down_block(__snake_case )
# middle
UpperCAmelCase__ : List[str] = self.mid_block(__snake_case )
# post-process
UpperCAmelCase__ : Tuple = self.conv_norm_out(__snake_case )
UpperCAmelCase__ : List[str] = self.conv_act(__snake_case )
UpperCAmelCase__ : Tuple = self.conv_out(__snake_case )
return sample
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=("UpDecoderBlock2D",) , __UpperCamelCase=(64,) , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase="silu" , __UpperCamelCase="group" , )-> Tuple:
super().__init__()
UpperCAmelCase__ : str = layers_per_block
UpperCAmelCase__ : Tuple = nn.Convad(
__snake_case , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = nn.ModuleList([] )
UpperCAmelCase__ : str = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__snake_case , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__snake_case , temb_channels=__snake_case , )
# up
UpperCAmelCase__ : Union[str, Any] = list(reversed(__snake_case ) )
UpperCAmelCase__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__snake_case ):
UpperCAmelCase__ : Optional[Any] = output_channel
UpperCAmelCase__ : str = reversed_block_out_channels[i]
UpperCAmelCase__ : int = i == len(__snake_case ) - 1
UpperCAmelCase__ : List[Any] = get_up_block(
__snake_case , num_layers=self.layers_per_block + 1 , in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__snake_case , resnet_groups=__snake_case , attention_head_dim=__snake_case , temb_channels=__snake_case , resnet_time_scale_shift=__snake_case , )
self.up_blocks.append(__snake_case )
UpperCAmelCase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ : Union[str, Any] = SpatialNorm(block_out_channels[0] , __snake_case )
else:
UpperCAmelCase__ : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__snake_case , eps=1E-6 )
UpperCAmelCase__ : Any = nn.SiLU()
UpperCAmelCase__ : Optional[int] = nn.Convad(block_out_channels[0] , __snake_case , 3 , padding=1 )
UpperCAmelCase__ : str = False
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None )-> List[str]:
UpperCAmelCase__ : str = z
UpperCAmelCase__ : List[Any] = self.conv_in(__snake_case )
UpperCAmelCase__ : int = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCamelCase ):
def custom_forward(*__UpperCamelCase ):
return module(*__snake_case )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __snake_case , __snake_case , use_reentrant=__snake_case )
UpperCAmelCase__ : Optional[int] = sample.to(__snake_case )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__snake_case ) , __snake_case , __snake_case , use_reentrant=__snake_case )
else:
# middle
UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __snake_case , __snake_case )
UpperCAmelCase__ : Tuple = sample.to(__snake_case )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__snake_case ) , __snake_case , __snake_case )
else:
# middle
UpperCAmelCase__ : str = self.mid_block(__snake_case , __snake_case )
UpperCAmelCase__ : Optional[int] = sample.to(__snake_case )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : Optional[Any] = up_block(__snake_case , __snake_case )
# post-process
if latent_embeds is None:
UpperCAmelCase__ : Optional[int] = self.conv_norm_out(__snake_case )
else:
UpperCAmelCase__ : List[Any] = self.conv_norm_out(__snake_case , __snake_case )
UpperCAmelCase__ : Tuple = self.conv_act(__snake_case )
UpperCAmelCase__ : Union[str, Any] = self.conv_out(__snake_case )
return sample
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="random" , __UpperCamelCase=False , __UpperCamelCase=True )-> Any:
super().__init__()
UpperCAmelCase__ : List[str] = n_e
UpperCAmelCase__ : Dict = vq_embed_dim
UpperCAmelCase__ : int = beta
UpperCAmelCase__ : Optional[int] = legacy
UpperCAmelCase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ : Dict = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ : Tuple = self.used.shape[0]
UpperCAmelCase__ : Tuple = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ : List[Any] = self.re_embed
UpperCAmelCase__ : str = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
UpperCAmelCase__ : Dict = n_e
UpperCAmelCase__ : Optional[Any] = sane_index_shape
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
UpperCAmelCase__ : Dict = inds.shape
assert len(__snake_case ) > 1
UpperCAmelCase__ : List[str] = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ : List[Any] = self.used.to(__snake_case )
UpperCAmelCase__ : Any = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ : int = match.argmax(-1 )
UpperCAmelCase__ : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ : List[Any] = self.unknown_index
return new.reshape(__snake_case )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Tuple = inds.shape
assert len(__snake_case ) > 1
UpperCAmelCase__ : Optional[Any] = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ : List[Any] = self.used.to(__snake_case )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ : Union[str, Any] = 0 # simply set to zero
UpperCAmelCase__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __snake_case )
return back.reshape(__snake_case )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ : Optional[int] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ : int = torch.argmin(torch.cdist(__snake_case , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ : List[str] = self.embedding(__snake_case ).view(z.shape )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ : List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ : int = self.remap_to_used(__snake_case )
UpperCAmelCase__ : Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase__ : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ : Tuple = self.unmap_to_all(__snake_case )
UpperCAmelCase__ : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ : Union[str, Any] = self.embedding(__snake_case )
if shape is not None:
UpperCAmelCase__ : Optional[Any] = z_q.view(__snake_case )
# reshape back to match original input shape
UpperCAmelCase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=False )-> List[Any]:
UpperCAmelCase__ : Any = parameters
UpperCAmelCase__ : List[str] = torch.chunk(__snake_case , 2 , dim=1 )
UpperCAmelCase__ : List[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ : str = deterministic
UpperCAmelCase__ : Dict = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ : Optional[int] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> int:
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase__ : Tuple = randn_tensor(
self.mean.shape , generator=__snake_case , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ : str = self.mean + self.std * sample
return x
def lowerCAmelCase__ ( self , __UpperCamelCase=None )-> str:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=[1, 2, 3] )-> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__snake_case )
def lowerCAmelCase__ ( self )-> str:
return self.mean
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = "mgp-str"
def __init__( self , __UpperCamelCase=[32, 1_28] , __UpperCamelCase=4 , __UpperCamelCase=3 , __UpperCamelCase=27 , __UpperCamelCase=38 , __UpperCamelCase=5_02_57 , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=4.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=0.02 , **__UpperCamelCase , )-> Any:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = max_token_length
UpperCAmelCase__ : Any = num_character_labels
UpperCAmelCase__ : Optional[int] = num_bpe_labels
UpperCAmelCase__ : str = num_wordpiece_labels
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = mlp_ratio
UpperCAmelCase__ : List[str] = distilled
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = drop_rate
UpperCAmelCase__ : Union[str, Any] = qkv_bias
UpperCAmelCase__ : str = attn_drop_rate
UpperCAmelCase__ : str = drop_path_rate
UpperCAmelCase__ : Dict = output_aa_attentions
UpperCAmelCase__ : Optional[int] = initializer_range
| 706 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class _lowercase ( _UpperCamelCase ):
'''simple docstring'''
_A = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_A = Features({'text': Value('string' )} )
_A = Features({'labels': ClassLabel} )
_A = "text"
_A = "labels"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase__ : Tuple = copy.deepcopy(self )
UpperCAmelCase__ : Optional[Any] = self.label_schema.copy()
UpperCAmelCase__ : Optional[Any] = features[self.label_column]
UpperCAmelCase__ : Union[str, Any] = label_schema
return task_template
@property
def lowerCAmelCase__ ( self )-> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 707 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A__ : str = """\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
A__ : Optional[int] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
A__ : Dict = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 4 , )-> Union[str, Any]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 708 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A__ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A__ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
'''simple docstring'''
_A = 42
_A = 42
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : int = None
for i in sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ):
UpperCAmelCase__ : Any = Node(_lowerCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
UpperCAmelCase__ : Dict = self.head
while node:
yield node.data
UpperCAmelCase__ : str = node.next_node
def __len__( self )-> int:
return sum(1 for _ in self )
def __str__( self )-> str:
return " -> ".join([str(_lowerCAmelCase ) for node in self] )
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ):
'''simple docstring'''
return SortedLinkedList(list(lowerCAmelCase ) + list(lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 709 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCamelCase__ ):
'''simple docstring'''
_A = 42
_A = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> Any:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 20_00 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , )-> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase__ : List[Any] = self.unet.config.sample_size
UpperCAmelCase__ : Any = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ : Dict = self.unet
UpperCAmelCase__ : Any = randn_tensor(_a , generator=_a ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ : Tuple = self.unet(_a , _a ).sample
UpperCAmelCase__ : int = self.scheduler.step_correct(_a , _a , generator=_a ).prev_sample
# prediction step
UpperCAmelCase__ : Optional[int] = model(_a , _a ).sample
UpperCAmelCase__ : Optional[Any] = self.scheduler.step_pred(_a , _a , _a , generator=_a )
UpperCAmelCase__ : Tuple = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ : Tuple = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : Any = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
# TODO Update this
A__ : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_A = 'esm'
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10_26 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , )-> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : int = position_embedding_type
UpperCAmelCase__ : Optional[Any] = use_cache
UpperCAmelCase__ : Optional[int] = emb_layer_norm_before
UpperCAmelCase__ : List[str] = token_dropout
UpperCAmelCase__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
UpperCAmelCase__ : int = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase__ : Any = get_default_vocab_list()
else:
UpperCAmelCase__ : Dict = vocab_list
else:
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , UpperCamelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowerCAmelCase__ ( self )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class _lowercase :
'''simple docstring'''
_A = None
_A = True
_A = False
_A = False
_A = False
_A = 0
_A = True
_A = False
_A = 128
_A = None
def lowerCAmelCase__ ( self )-> List[Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = TrunkConfig(**self.trunk )
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = asdict(self )
UpperCAmelCase__ : int = self.trunk.to_dict()
return output
@dataclass
class _lowercase :
'''simple docstring'''
_A = 48
_A = 1024
_A = 128
_A = 32
_A = 32
_A = 32
_A = 0
_A = 0
_A = False
_A = 4
_A = 128
_A = None
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
UpperCAmelCase__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
UpperCAmelCase__ : Tuple = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowerCAmelCase__ ( self )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = asdict(self )
UpperCAmelCase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class _lowercase :
'''simple docstring'''
_A = 384
_A = 128
_A = 16
_A = 128
_A = 12
_A = 4
_A = 8
_A = 0.1
_A = 8
_A = 1
_A = 2
_A = 7
_A = 10
_A = 1e-8
_A = 1e5
def lowerCAmelCase__ ( self )-> List[str]:
'''simple docstring'''
return asdict(self )
def a__ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 711 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : Dict = 16
A__ : Tuple = 32
def a__ ( lowerCAmelCase : Accelerator , lowerCAmelCase : int = 16 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : List[Any] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Tuple = 8
else:
UpperCAmelCase__ : Any = None
return tokenizer.pad(
lowerCamelCase_ , padding="longest" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
UpperCAmelCase__ : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : int = mocked_dataloaders # noqa: F811
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase_ ) == "1":
UpperCAmelCase__ : List[str] = 2
# Initialize accelerator
UpperCAmelCase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Union[str, Any] = config["lr"]
UpperCAmelCase__ : Optional[int] = int(config["num_epochs"] )
UpperCAmelCase__ : Optional[int] = int(config["seed"] )
UpperCAmelCase__ : Optional[Any] = int(config["batch_size"] )
UpperCAmelCase__ : Any = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Tuple = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
UpperCAmelCase__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Any = model(**lowerCamelCase_ )
UpperCAmelCase__ : Any = outputs.loss
UpperCAmelCase__ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase__ : Optional[Any] = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCamelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase__ : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
UpperCAmelCase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase_ )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase__ : Any = parser.parse_args()
UpperCAmelCase__ : str = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A__ : Tuple = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {}
state_dict.pop("pixel_mean" , __snake_case )
state_dict.pop("pixel_std" , __snake_case )
UpperCAmelCase__ : int = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ : int = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
UpperCAmelCase__ : Optional[Any] = int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase__ : Tuple = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCAmelCase__ : List[Any] = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCAmelCase__ : int = key.replace("layers.2" , "proj_out" )
UpperCAmelCase__ : Any = value
UpperCAmelCase__ : List[Any] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]="ybelkada/segment-anything" ):
'''simple docstring'''
UpperCAmelCase__ : int = hf_hub_download(__snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
UpperCAmelCase__ : List[Any] = SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase__ : Any = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCAmelCase__ : Dict = SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
UpperCAmelCase__ : Optional[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCAmelCase__ : Optional[Any] = SamConfig(
vision_config=__snake_case , )
UpperCAmelCase__ : Optional[Any] = torch.load(__snake_case , map_location="cpu" )
UpperCAmelCase__ : List[str] = replace_keys(__snake_case )
UpperCAmelCase__ : List[str] = SamImageProcessor()
UpperCAmelCase__ : Any = SamProcessor(image_processor=__snake_case )
UpperCAmelCase__ : Dict = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
UpperCAmelCase__ : Optional[Any] = hf_model.to("cuda" )
UpperCAmelCase__ : Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCAmelCase__ : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("RGB" )
UpperCAmelCase__ : Tuple = [[[400, 650]]]
UpperCAmelCase__ : Tuple = [[1]]
UpperCAmelCase__ : str = processor(images=np.array(__snake_case ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ : Tuple = hf_model(**__snake_case )
UpperCAmelCase__ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase__ : Any = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = hf_model(**__snake_case )
UpperCAmelCase__ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase__ : Tuple = ((75, 275, 1725, 850),)
UpperCAmelCase__ : str = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = hf_model(**__snake_case )
UpperCAmelCase__ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase__ : Dict = [[[400, 650], [800, 650]]]
UpperCAmelCase__ : Optional[Any] = [[1, 1]]
UpperCAmelCase__ : str = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ : Any = hf_model(**__snake_case )
UpperCAmelCase__ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
A__ : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
A__ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 713 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Any = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
A__ : Any = {
"""gpt-neox-20b""": 2_048,
}
class _lowercase ( __lowercase ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase=False , **__UpperCamelCase , )-> List[Any]:
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
UpperCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
UpperCAmelCase__ : Tuple = getattr(__A , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : Optional[int] = add_prefix_space
UpperCAmelCase__ : List[str] = pre_tok_class(**__A )
UpperCAmelCase__ : Optional[Any] = add_prefix_space
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
UpperCAmelCase__ : Optional[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[int]:
UpperCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
UpperCAmelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : List[str] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 716 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[str] = metric_id
class _lowercase :
'''simple docstring'''
_A = [MetricMock(UpperCamelCase_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase__ ( self )-> List[Any]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ):
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase__ : Dict = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase )
| 717 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=36 , __UpperCamelCase=6 , __UpperCamelCase=6 , __UpperCamelCase=6 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> Dict:
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : str = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Optional[Any] = embedding_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Any = num_hidden_groups
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Union[str, Any] = num_choices
UpperCAmelCase__ : Optional[int] = scope
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : str = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self )-> int:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Optional[int] = AlbertModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__A , attention_mask=__A , token_type_ids=__A )
UpperCAmelCase__ : Tuple = model(__A , token_type_ids=__A )
UpperCAmelCase__ : Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCAmelCase__ : Tuple = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : str = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Tuple = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : Any = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : int = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : List[Any] = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Dict = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : Tuple = self.num_choices
UpperCAmelCase__ : Dict = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Union[str, Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_A = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> int:
UpperCAmelCase__ : Tuple = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
UpperCAmelCase__ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
UpperCAmelCase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Tuple = AlbertModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(*__A )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : int = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : str = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(__A , attention_mask=__A )[0]
UpperCAmelCase__ : int = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
"""simple docstring"""
A__ : Optional[int] = """\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
A__ : Optional[int] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
A__ : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A__ : List[str] = logging.get_logger("""transformers.models.speecht5""")
A__ : List[str] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
A__ : int = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
A__ : str = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
A__ : Any = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
A__ : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
A__ : int = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
A__ : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
A__ : Optional[Any] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
A__ : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A__ : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A__ : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A__ : int = []
A__ : Optional[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
A__ : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
A__ : Union[str, Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
A__ : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
UpperCAmelCase__ : Tuple = getattr(lowercase_ , lowercase_ ).shape
else:
UpperCAmelCase__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase__ : Any = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : Optional[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : str = value
elif weight_type == "running_mean":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "running_var":
UpperCAmelCase__ : str = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
if task == "s2t":
UpperCAmelCase__ : Any = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Optional[Any] = MAPPING_S2T
UpperCAmelCase__ : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Optional[int] = MAPPING_T2S
UpperCAmelCase__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : List[str] = MAPPING_S2S
UpperCAmelCase__ : Dict = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(F"{name} was ignored" )
continue
UpperCAmelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : Any = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Dict = True
if "*" in mapped_key:
UpperCAmelCase__ : int = name.split(lowercase_ )[0].split("." )[-2]
UpperCAmelCase__ : List[str] = mapped_key.replace("*" , lowercase_ )
if "weight_g" in name:
UpperCAmelCase__ : Optional[Any] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ : List[Any] = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ : int = """bias"""
elif "weight" in name:
UpperCAmelCase__ : Any = """weight"""
elif "running_mean" in name:
UpperCAmelCase__ : Optional[Any] = """running_mean"""
elif "running_var" in name:
UpperCAmelCase__ : Tuple = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Dict = """num_batches_tracked"""
else:
UpperCAmelCase__ : Union[str, Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : Optional[int] = name.split("." )
UpperCAmelCase__ : Any = int(items[0] )
UpperCAmelCase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase__ : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase__ : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase__ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Dict=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Tuple = SpeechTaConfig.from_pretrained(lowercase_ )
else:
UpperCAmelCase__ : Tuple = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : Optional[int] = config.max_text_positions
UpperCAmelCase__ : Any = SpeechTaForSpeechToText(lowercase_ )
elif task == "t2s":
UpperCAmelCase__ : List[Any] = 1876
UpperCAmelCase__ : str = 600
UpperCAmelCase__ : List[Any] = config.max_speech_positions
UpperCAmelCase__ : Tuple = SpeechTaForTextToSpeech(lowercase_ )
elif task == "s2s":
UpperCAmelCase__ : Dict = 1876
UpperCAmelCase__ : int = config.max_speech_positions
UpperCAmelCase__ : Any = SpeechTaForSpeechToSpeech(lowercase_ )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
UpperCAmelCase__ : Any = SpeechTaTokenizer(lowercase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[Any] = AddedToken("<mask>" , lstrip=lowercase_ , rstrip=lowercase_ )
UpperCAmelCase__ : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCAmelCase__ : Dict = SpeechTaFeatureExtractor()
UpperCAmelCase__ : List[Any] = SpeechTaProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(lowercase_ )
UpperCAmelCase__ : List[Any] = torch.load(lowercase_ )
recursively_load_weights(fairseq_checkpoint["model"] , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
A__ : List[str] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="shi-labs/oneformer_demo"):
'''simple docstring'''
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset") , "r") as f:
UpperCAmelCase__ : Tuple = json.load(lowerCAmelCase__)
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = []
for key, info in class_info.items():
UpperCAmelCase__ : str = info["name"]
class_names.append(info["name"])
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__))
UpperCAmelCase__ : List[Any] = thing_ids
UpperCAmelCase__ : Any = class_names
return metadata
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=10 , __UpperCamelCase=False , __UpperCamelCase=2_55 , __UpperCamelCase="shi-labs/oneformer_demo" , __UpperCamelCase="ade20k_panoptic.json" , __UpperCamelCase=10 , )-> int:
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Optional[Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
UpperCAmelCase__ : List[str] = do_normalize
UpperCAmelCase__ : List[Any] = image_mean
UpperCAmelCase__ : int = image_std
UpperCAmelCase__ : List[str] = class_info_file
UpperCAmelCase__ : Any = prepare_metadata(_a , _a )
UpperCAmelCase__ : List[str] = num_text
UpperCAmelCase__ : Optional[int] = repo_path
# for the post_process_functions
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : List[Any] = 10
UpperCAmelCase__ : List[str] = 10
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : str = do_reduce_labels
UpperCAmelCase__ : List[Any] = ignore_index
def lowerCAmelCase__ ( self )-> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> str:
if not batched:
UpperCAmelCase__ : Union[str, Any] = image_inputs[0]
if isinstance(_a , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase__ : List[str] = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase__ : Dict = self.size["shortest_edge"]
UpperCAmelCase__ : str = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase__ : Optional[int] = self.size["shortest_edge"]
UpperCAmelCase__ : Optional[Any] = self.size["shortest_edge"]
else:
UpperCAmelCase__ : List[str] = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Dict = max(_a , key=lambda __UpperCamelCase : item[0] )[0]
UpperCAmelCase__ : Optional[int] = max(_a , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase__ ( self )-> List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_A = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_A = image_processing_class
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
self.assertTrue(hasattr(_a , "ignore_index" ) )
self.assertTrue(hasattr(_a , "class_info_file" ) )
self.assertTrue(hasattr(_a , "num_text" ) )
self.assertTrue(hasattr(_a , "repo_path" ) )
self.assertTrue(hasattr(_a , "metadata" ) )
self.assertTrue(hasattr(_a , "do_reduce_labels" ) )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
def lowerCAmelCase__ ( self )-> Optional[Any]:
# Initialize image_processor
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processing_tester.get_expected_values(_a , batched=_a )
UpperCAmelCase__ : str = image_processor(
_a , ["semantic"] * len(_a ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> Tuple:
# Initialize image_processor
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_expected_values(_a , batched=_a )
UpperCAmelCase__ : str = image_processor(
_a , ["semantic"] * len(_a ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> Optional[int]:
# Initialize image_processor
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processing_tester.get_expected_values(_a , batched=_a )
UpperCAmelCase__ : Optional[Any] = image_processor(
_a , ["semantic"] * len(_a ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase="np" )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ : List[str] = self.image_processing_tester.num_labels
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
if with_segmentation_maps:
UpperCAmelCase__ : int = num_labels
if is_instance_map:
UpperCAmelCase__ : Optional[Any] = list(range(_a ) ) * 2
UpperCAmelCase__ : Union[str, Any] = dict(enumerate(_a ) )
UpperCAmelCase__ : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ : str = [Image.fromarray(_a ) for annotation in annotations]
UpperCAmelCase__ : Tuple = image_processor(
_a , ["semantic"] * len(_a ) , _a , return_tensors="pt" , instance_id_to_semantic_id=_a , pad_and_return_pixel_mask=_a , )
return inputs
def lowerCAmelCase__ ( self )-> int:
pass
def lowerCAmelCase__ ( self )-> Any:
def common(__UpperCamelCase=False , __UpperCamelCase=None ):
UpperCAmelCase__ : Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_a , is_instance_map=_a , segmentation_type=_a )
UpperCAmelCase__ : str = inputs["mask_labels"]
UpperCAmelCase__ : List[str] = inputs["class_labels"]
UpperCAmelCase__ : Any = inputs["pixel_values"]
UpperCAmelCase__ : Dict = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(_a , _a , _a ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_a ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_a )
common(is_instance_map=_a , segmentation_type="pil" )
common(is_instance_map=_a , segmentation_type="pil" )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[Any] = np.zeros((20, 50) )
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : List[str] = binary_mask_to_rle(_a )
self.assertEqual(len(_a ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCAmelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : Tuple = fature_extractor.post_process_semantic_segmentation(_a )
self.assertEqual(len(_a ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ : Dict = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ : Dict = fature_extractor.post_process_semantic_segmentation(_a , target_sizes=_a )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCAmelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : str = image_processor.post_process_instance_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , _a )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCAmelCase__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , _a )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 721 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a__ ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=True ):
'''simple docstring'''
model.train()
UpperCAmelCase__ : int = model(_lowerCamelCase )
UpperCAmelCase__ : Dict = F.mse_loss(_lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=False ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase__ : str = RegressionModel()
UpperCAmelCase__ : Dict = deepcopy(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = RegressionDataset(length=80 )
UpperCAmelCase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase__ : Any = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase__ : Tuple = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase__ : Any = LambdaLR(_lowerCamelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
UpperCAmelCase__ : Optional[Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a__ ( lowerCAmelCase : Any ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = get_training_setup(_lowerCamelCase )
# Use a single batch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = get_training_setup(_lowerCamelCase )
# Use a single batch
UpperCAmelCase__ , UpperCAmelCase__ : Any = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Union[str, Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def a__ ( lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : int = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def a__ ( lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_training_setup(_lowerCamelCase , _lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
UpperCAmelCase__ : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Accelerator()
UpperCAmelCase__ : Optional[int] = RegressionDataset(length=80 )
UpperCAmelCase__ : int = DataLoader(_lowerCamelCase , batch_size=16 )
UpperCAmelCase__ : str = RegressionDataset(length=96 )
UpperCAmelCase__ : str = DataLoader(_lowerCamelCase , batch_size=16 )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Accelerator()
UpperCAmelCase__ : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase )
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 0 |
"""simple docstring"""
from itertools import permutations
def a__ ( lowerCAmelCase : List[str] ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def a__ ( lowerCAmelCase : Optional[Any] = 10 ):
return sum(
int("".join(map(lowerCAmelCase , lowerCAmelCase ) ) )
for num in permutations(range(lowerCAmelCase ) )
if is_substring_divisible(lowerCAmelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 0
_A = False
_A = 3.0
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Union[str, Any]:
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase__ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[int] = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase__ : List[Any] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase__ : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , lowercase__ )
@require_multi_gpu
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : int = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
if __name__ == "__main__":
A__ : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A__ : Dict = Accelerator(kwargs_handlers=[ddp_scaler])
A__ : Optional[int] = torch.nn.Linear(100, 200)
A__ : int = accelerator.prepare(model)
# Check the values changed in kwargs
A__ : Dict = """"""
A__ : Tuple = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 702 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
def a__ ( lowerCAmelCase : int = 100_0000 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = {1: 1}
for inputa in range(2 , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Tuple = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCAmelCase__ : int = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCAmelCase__ : int = counter
if counter > pre_counter:
UpperCAmelCase__ : List[Any] = inputa
UpperCAmelCase__ : Optional[int] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 703 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Tuple = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class _lowercase ( __A ):
'''simple docstring'''
_A = '''swinv2'''
_A = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __UpperCamelCase=2_24 , __UpperCamelCase=4 , __UpperCamelCase=3 , __UpperCamelCase=96 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[3, 6, 12, 24] , __UpperCamelCase=7 , __UpperCamelCase=4.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=32 , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : int = num_channels
UpperCAmelCase__ : Any = embed_dim
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : List[Any] = len(__UpperCamelCase )
UpperCAmelCase__ : Any = num_heads
UpperCAmelCase__ : Optional[int] = window_size
UpperCAmelCase__ : Dict = mlp_ratio
UpperCAmelCase__ : Any = qkv_bias
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : int = use_absolute_embeddings
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : int = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
UpperCAmelCase__ : List[Any] = (0, 0, 0, 0)
| 704 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , )-> Optional[int]:
UpperCAmelCase__ : List[Any] = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : Union[str, Any] = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : str = size
UpperCAmelCase__ : int = do_normalize
def lowerCAmelCase__ ( self )-> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowercase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_A = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Any = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "clusters" ) )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "do_normalize" ) )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Tuple = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , obj[key] ) )
else:
self.assertEqual(obj[key] , _A )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(_A , "image_processor.json" )
image_processor_first.to_json_file(_A )
UpperCAmelCase__ : Dict = self.image_processing_class.from_json_file(_A ).to_dict()
UpperCAmelCase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_A )
UpperCAmelCase__ : Tuple = self.image_processing_class.from_pretrained(_A ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase__ : int = Image.open(dataset[4]["file"] )
UpperCAmelCase__ : str = Image.open(dataset[5]["file"] )
UpperCAmelCase__ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : List[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase__ : str = prepare_images()
# test non-batched
UpperCAmelCase__ : int = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
UpperCAmelCase__ : Union[str, Any] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _A )
# test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
UpperCAmelCase__ : Dict = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A )
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : Any = use_input_mask
UpperCAmelCase__ : Optional[int] = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : str = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Dict = num_choices
UpperCAmelCase__ : Any = scope
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self )-> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Any = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase__ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> str:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
UpperCAmelCase__ : Optional[int] = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> int:
UpperCAmelCase__ : Tuple = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
UpperCAmelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
UpperCAmelCase__ : str = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
# select random slice
UpperCAmelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = OpenLlamaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowerCAmelCase__ ( self )-> List[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : List[Any] = input_dict["input_ids"]
UpperCAmelCase__ : Optional[int] = input_ids.ne(1 ).to(snake_case__ )
UpperCAmelCase__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : Any = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : str = "single_label_classification"
UpperCAmelCase__ : Union[str, Any] = input_dict["input_ids"]
UpperCAmelCase__ : List[str] = input_ids.ne(1 ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = "multi_label_classification"
UpperCAmelCase__ : str = input_dict["input_ids"]
UpperCAmelCase__ : Optional[int] = input_ids.ne(1 ).to(snake_case__ )
UpperCAmelCase__ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase__ : List[str] = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def lowerCAmelCase__ ( self )-> str:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : List[str] = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
UpperCAmelCase__ : Optional[int] = original_model(snake_case__ ).last_hidden_state
UpperCAmelCase__ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : int = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase__ : int = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
UpperCAmelCase__ : Optional[Any] = scaled_model(snake_case__ ).last_hidden_state
UpperCAmelCase__ : Any = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 706 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
A__ : List[str] = {
"""joule""": 1.0,
"""kilojoule""": 1_000,
"""megajoule""": 1_000_000,
"""gigajoule""": 1_000_000_000,
"""wattsecond""": 1.0,
"""watthour""": 3_600,
"""kilowatthour""": 3_600_000,
"""newtonmeter""": 1.0,
"""calorie_nutr""": 4_186.8,
"""kilocalorie_nutr""": 4_186_800.00,
"""electronvolt""": 1.602_176_634e-19,
"""britishthermalunit_it""": 1_055.05_585,
"""footpound""": 1.35_5818,
}
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase__ : List[Any] = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(lowerCAmelCase_ )}"
)
raise ValueError(lowerCAmelCase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowercase :
'''simple docstring'''
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
raise NotImplementedError()
def lowerCAmelCase__ ( self )-> List[str]:
raise NotImplementedError()
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = False , **__UpperCamelCase )-> List[str]:
UpperCAmelCase__ : str = tokenizer
UpperCAmelCase__ : Optional[int] = skip_prompt
UpperCAmelCase__ : Optional[Any] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : str = True
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
UpperCAmelCase__ : Optional[int] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase__ : Tuple = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase__ : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
UpperCAmelCase__ : int = text[self.print_len :]
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase__ : Optional[int] = text[self.print_len :]
self.print_len += len(__UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase__ : Dict = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__UpperCamelCase )
self.on_finalized_text(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[str]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase__ : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase__ : Tuple = text[self.print_len :]
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : List[Any] = 0
else:
UpperCAmelCase__ : List[Any] = ""
UpperCAmelCase__ : Optional[Any] = True
self.on_finalized_text(__UpperCamelCase , stream_end=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False )-> Union[str, Any]:
print(__UpperCamelCase , flush=__UpperCamelCase , end="" if not stream_end else None )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , **__UpperCamelCase )-> List[str]:
super().__init__(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : List[str] = Queue()
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Tuple = timeout
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False )-> Any:
self.text_queue.put(__UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self )-> Dict:
return self
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : List[str] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 708 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[str] = data
UpperCAmelCase__ : Optional[Any] = None
class _lowercase :
'''simple docstring'''
def __init__( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = None
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : int = self.head
while temp is not None:
print(temp.data , end=" " )
UpperCAmelCase__ : Any = temp.next
print()
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : List[str] = Node(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = self.head
UpperCAmelCase__ : int = new_node
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Tuple:
if node_data_a == node_data_a:
return
else:
UpperCAmelCase__ : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase__ : Optional[Any] = node_a.next
UpperCAmelCase__ : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
A__ : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 709 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 0 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a__ ( *lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple=2 ):
'''simple docstring'''
from .. import __version__
UpperCAmelCase__ : int = take_from
UpperCAmelCase__ : Tuple = ()
if not isinstance(args[0] , A_ ):
UpperCAmelCase__ : Any = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A_ ).base_version ) >= version.parse(A_ ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase__ : List[Any] = None
if isinstance(A_ , A_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A_ ),)
UpperCAmelCase__ : Optional[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(A_ , A_ ):
values += (getattr(A_ , A_ ),)
UpperCAmelCase__ : Dict = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase__ : str = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase__ : Optional[Any] = warning + " " if standard_warn else ""
warnings.warn(warning + message , A_ , stacklevel=A_ )
if isinstance(A_ , A_ ) and len(A_ ) > 0:
UpperCAmelCase__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase__ : Union[str, Any] = call_frame.filename
UpperCAmelCase__ : Any = call_frame.lineno
UpperCAmelCase__ : str = call_frame.function
UpperCAmelCase__ , UpperCAmelCase__ : Any = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(A_ ) == 0:
return
elif len(A_ ) == 1:
return values[0]
return values
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=4 , )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : str = use_attention_mask
UpperCAmelCase__ : Union[str, Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : str = num_choices
def lowerCAmelCase__ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
_A = True
_A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = FlaxBertModelTester(self )
@slow
def lowerCAmelCase__ ( self )-> str:
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxBertModel.from_pretrained("bert-base-cased" )
UpperCAmelCase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 711 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = GPTSwaTokenizer
_A = False
_A = True
_A = False
def lowerCAmelCase__ ( self )-> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Any = GPTSwaTokenizer(__a , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : int = "This is a test"
UpperCAmelCase__ : Union[str, Any] = "This is a test"
return input_text, output_text
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Any = "<s>"
UpperCAmelCase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 20_00 )
def lowerCAmelCase__ ( self )-> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = GPTSwaTokenizer(__a )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__a , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(__a )
# fmt: off
self.assertListEqual(
__a , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = GPTSwaTokenizer(__a )
UpperCAmelCase__ : Dict = ["This is a test", "I was born in 92000, and this is falsé."]
UpperCAmelCase__ : Union[str, Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a , __a ):
self.assertListEqual(tokenizer.encode_fast(__a ) , __a )
# Test that decode_fast returns the input text
for text, token_ids in zip(__a , __a ):
self.assertEqual(tokenizer.decode_fast(__a ) , __a )
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
UpperCAmelCase__ : str = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__a , )
| 712 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : List[Any] = {"""vocab_file""": """vocab.json"""}
A__ : int = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
A__ : List[Any] = {"""mgp-str""": 27}
class _lowercase ( lowercase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCamelCase , __UpperCamelCase="[GO]" , __UpperCamelCase="[GO]" , __UpperCamelCase="[s]" , __UpperCamelCase="[GO]" , **__UpperCamelCase )-> Union[str, Any]:
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Dict = {v: k for k, v in self.vocab.items()}
@property
def lowerCAmelCase__ ( self )-> Tuple:
return len(self.vocab )
def lowerCAmelCase__ ( self )-> str:
return dict(self.vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : int = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return self.decoder.get(lowerCamelCase_ )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Optional[Any]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase_ ) )
return
UpperCAmelCase__ : List[str] = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + "\n" )
return (vocab_file,)
| 713 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : bytes ):
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ ( *__UpperCamelCase , **__UpperCamelCase )-> Tuple:
pass
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = np.array(lowerCamelCase__ )
UpperCAmelCase__ : List[Any] = npimg.shape
return {"hash": hashimage(lowerCamelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_A = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Dict = MaskGenerationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCAmelCase__ ( self )-> Dict:
pass
@slow
@require_torch
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase__ : Union[str, Any] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase__ : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : int = "facebook/sam-vit-huge"
UpperCAmelCase__ : Optional[Any] = pipeline("mask-generation" , model=__UpperCamelCase )
UpperCAmelCase__ : List[Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase__ : int = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
] , )
| 715 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 0 |
"""simple docstring"""
class _lowercase :
'''simple docstring'''
def __init__( self )-> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = {}
def lowerCAmelCase__ ( self )-> None:
print(self.vertex )
for i in self.vertex:
print(lowercase_ , " -> " , " -> ".join([str(lowercase_ ) for j in self.vertex[i]] ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> None:
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_ )
else:
# else make a new vertex
UpperCAmelCase__ : Any = [to_vertex]
def lowerCAmelCase__ ( self )-> None:
UpperCAmelCase__ : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> None:
UpperCAmelCase__ : int = True
print(lowercase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
if __name__ == "__main__":
A__ : Tuple = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 716 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = len(snake_case__ )
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if numbers[j] < numbers[i]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
A__ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 717 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = str(lowerCAmelCase )
return n == n[::-1]
def a__ ( lowerCAmelCase : Dict = 100_0000 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 0
for i in range(1 , lowerCAmelCase ):
if is_palindrome(lowerCAmelCase ) and is_palindrome(bin(lowerCAmelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( lowerCAmelCase : Union[str, Any] = 5000 ):
'''simple docstring'''
UpperCAmelCase__ : str = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase )]
for i, pentagonal_i in enumerate(lowerCAmelCase ):
for j in range(lowerCAmelCase , len(lowerCAmelCase ) ):
UpperCAmelCase__ : List[Any] = pentagonal_nums[j]
UpperCAmelCase__ : int = pentagonal_i + pentagonal_j
UpperCAmelCase__ : Optional[int] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase ) and is_pentagonal(lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _lowercase ( __lowerCamelCase , __lowerCamelCase ):
_A = 'pixel_values'
_A = False
_A = TimmBackboneConfig
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> Any:
requires_backends(self , "timm" )
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : Dict = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(SCREAMING_SNAKE_CASE_ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
UpperCAmelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , "use_pretrained_backbone" , SCREAMING_SNAKE_CASE_ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCAmelCase__ : Dict = config.out_indices if getattr(SCREAMING_SNAKE_CASE_ , "out_indices" , SCREAMING_SNAKE_CASE_ ) is not None else (-1,)
UpperCAmelCase__ : str = timm.create_model(
config.backbone , pretrained=SCREAMING_SNAKE_CASE_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCAmelCase__ : Union[str, Any] = self._backbone.return_layers
UpperCAmelCase__ : str = {layer["module"]: str(SCREAMING_SNAKE_CASE_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("config" , TimmBackboneConfig() )
UpperCAmelCase__ : int = kwargs.pop("use_timm_backbone" , SCREAMING_SNAKE_CASE_ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
UpperCAmelCase__ : List[Any] = kwargs.pop("num_channels" , config.num_channels )
UpperCAmelCase__ : Any = kwargs.pop("features_only" , config.features_only )
UpperCAmelCase__ : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("out_indices" , config.out_indices )
UpperCAmelCase__ : str = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , features_only=SCREAMING_SNAKE_CASE_ , use_pretrained_backbone=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , )
return super()._from_config(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
pass
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> str:
UpperCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCAmelCase__ : str = self._all_layers
UpperCAmelCase__ : List[str] = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : Optional[Any] = self._return_layers
UpperCAmelCase__ : List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCAmelCase__ : List[Any] = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : str = tuple(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : int = tuple(SCREAMING_SNAKE_CASE_ ) if hidden_states is not None else None
if not return_dict:
UpperCAmelCase__ : List[Any] = (feature_maps,)
if output_hidden_states:
UpperCAmelCase__ : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , attentions=SCREAMING_SNAKE_CASE_ )
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=50 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=None , )-> Dict:
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Any = use_input_mask
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : int = scope
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self )-> Optional[int]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self )-> List[Any]:
(
UpperCAmelCase__
) : int = self.prepare_config_and_inputs()
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> List[str]:
UpperCAmelCase__ : Tuple = BertGenerationEncoder(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase__ : int = model(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase__ : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> List[Any]:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : int = BertGenerationEncoder(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase__ : int = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
UpperCAmelCase__ : int = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> Optional[Any]:
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Optional[int] = BertGenerationDecoder(config=snake_case_ ).to(snake_case_ ).eval()
# first forward pass
UpperCAmelCase__ : Dict = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
UpperCAmelCase__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : Optional[int] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
UpperCAmelCase__ : Tuple = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
# select random slice
UpperCAmelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , )-> List[str]:
UpperCAmelCase__ : List[str] = BertGenerationDecoder(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_A = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_A = (BertGenerationDecoder,) if is_torch_available() else ()
_A = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[Any] = BertGenerationEncoderTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self )-> str:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = "bert"
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case_ )
def lowerCAmelCase__ ( self )-> Dict:
# This regression test was failing with PyTorch < 1.3
(
UpperCAmelCase__
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
@slow
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : int = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(snake_case_ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : str = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCAmelCase__ : Optional[int] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case_ )[0]
UpperCAmelCase__ : str = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , snake_case_ )
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCAmelCase__ : List[Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(snake_case_ )[0]
UpperCAmelCase__ : str = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , snake_case_ )
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 721 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Any = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _lowercase ( lowercase__ ):
'''simple docstring'''
_A = 'mctct'
def __init__( self , __UpperCamelCase=80_65 , __UpperCamelCase=15_36 , __UpperCamelCase=36 , __UpperCamelCase=61_44 , __UpperCamelCase=4 , __UpperCamelCase=3_84 , __UpperCamelCase=9_20 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.3 , __UpperCamelCase="relu" , __UpperCamelCase=0.02 , __UpperCamelCase=0.3 , __UpperCamelCase=0.3 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0.3 , __UpperCamelCase=1 , __UpperCamelCase=(7,) , __UpperCamelCase=(3,) , __UpperCamelCase=80 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase="sum" , __UpperCamelCase=False , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Tuple = attention_head_dim
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : int = layerdrop
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = pad_token_id
UpperCAmelCase__ : Union[str, Any] = bos_token_id
UpperCAmelCase__ : Optional[int] = eos_token_id
UpperCAmelCase__ : Optional[Any] = conv_glu_dim
UpperCAmelCase__ : Any = conv_dropout
UpperCAmelCase__ : int = num_conv_layers
UpperCAmelCase__ : Optional[Any] = input_feat_per_channel
UpperCAmelCase__ : Optional[int] = input_channels
UpperCAmelCase__ : List[str] = conv_channels
UpperCAmelCase__ : Optional[Any] = ctc_loss_reduction
UpperCAmelCase__ : Optional[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCAmelCase__ : Optional[int] = list(UpperCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = list(UpperCAmelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 700 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : str ):
UpperCAmelCase__ : Optional[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase__ : str = set()
return any(
node not in visited and depth_first_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for node in graph )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
visited.add(_lowerCAmelCase )
rec_stk.add(_lowerCAmelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowerCAmelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def a__ ( lowerCAmelCase : np.ndarray ):
'''simple docstring'''
UpperCAmelCase__ : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def a__ ( lowerCAmelCase : np.ndarray ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def a__ ( lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.zeros_like(lowerCamelCase_ )
UpperCAmelCase__ : Any = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase__ : List[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase__ : Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase__ : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
A__ : Union[str, Any] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
A__ : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
A__ : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
A__ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
A__ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 702 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _lowercase ( UpperCAmelCase_ ):
'''simple docstring'''
_A = 'altclip_text_model'
def __init__( self , __UpperCamelCase=25_00_02 , __UpperCamelCase=10_24 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=40_96 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_14 , __UpperCamelCase=1 , __UpperCamelCase=0.02 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-05 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=7_68 , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[Any] = initializer_factor
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : str = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Optional[Any] = project_dim
class _lowercase ( UpperCAmelCase_ ):
'''simple docstring'''
_A = 'altclip_vision_model'
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=30_72 , __UpperCamelCase=5_12 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3 , __UpperCamelCase=2_24 , __UpperCamelCase=32 , __UpperCamelCase="quick_gelu" , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , **__UpperCamelCase , )-> Tuple:
super().__init__(**_lowercase )
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : List[Any] = projection_dim
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = initializer_factor
UpperCAmelCase__ : List[Any] = attention_dropout
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Tuple = hidden_act
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
cls._set_token_in_kwargs(_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
UpperCAmelCase__ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class _lowercase ( UpperCAmelCase_ ):
'''simple docstring'''
_A = 'altclip'
_A = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=7_68 , __UpperCamelCase=2.6592 , **__UpperCamelCase )-> Union[str, Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
UpperCAmelCase__ : Optional[int] = kwargs.pop("text_config_dict" , _lowercase )
UpperCAmelCase__ : Optional[Any] = kwargs.pop("vision_config_dict" , _lowercase )
super().__init__(**_lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
UpperCAmelCase__ : List[Any] = {}
# This is the complete result when using `text_config_dict`.
UpperCAmelCase__ : Any = AltCLIPTextConfig(**_lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
UpperCAmelCase__ : Any = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase__ : str = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
UpperCAmelCase__ : Optional[int] = {}
# This is the complete result when using `vision_config_dict`.
UpperCAmelCase__ : Union[str, Any] = AltCLIPVisionConfig(**_lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
UpperCAmelCase__ : Optional[int] = {
str(_lowercase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
UpperCAmelCase__ : Tuple = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase__ : List[Any] = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
UpperCAmelCase__ : Any = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase__ : str = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
UpperCAmelCase__ : Any = AltCLIPTextConfig(**_lowercase )
UpperCAmelCase__ : Optional[Any] = AltCLIPVisionConfig(**_lowercase )
UpperCAmelCase__ : str = projection_dim
UpperCAmelCase__ : List[Any] = logit_scale_init_value
UpperCAmelCase__ : List[Any] = 1.0
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : List[str] = self.text_config.to_dict()
UpperCAmelCase__ : List[Any] = self.vision_config.to_dict()
UpperCAmelCase__ : Union[str, Any] = self.__class__.model_type
return output
| 703 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.