code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__: Tuple = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _a :
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple=16 , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: List[Any]=7 , __lowerCamelCase: Dict=14 , __lowerCamelCase: Dict=10 , __lowerCamelCase: Optional[int]=19 , __lowerCamelCase: Dict=5 , __lowerCamelCase: Tuple=4 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: Optional[int]=4 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Dict=[1, 2, 3, 4, 5] , __lowerCamelCase: int=25 , __lowerCamelCase: Dict=5 , ):
'''simple docstring'''
UpperCamelCase__: Dict = d_model
UpperCamelCase__: List[str] = parent
UpperCamelCase__: List[Any] = batch_size
UpperCamelCase__: Union[str, Any] = prediction_length
UpperCamelCase__: List[str] = context_length
UpperCamelCase__: Optional[Any] = cardinality
UpperCamelCase__: List[str] = num_time_features
UpperCamelCase__: Optional[int] = lags_sequence
UpperCamelCase__: Dict = embedding_dimension
UpperCamelCase__: List[str] = is_training
UpperCamelCase__: int = hidden_size
UpperCamelCase__: List[Any] = num_hidden_layers
UpperCamelCase__: List[str] = num_attention_heads
UpperCamelCase__: Optional[int] = intermediate_size
UpperCamelCase__: Optional[Any] = hidden_act
UpperCamelCase__: List[str] = hidden_dropout_prob
UpperCamelCase__: Any = attention_probs_dropout_prob
UpperCamelCase__: Optional[Any] = context_length
UpperCamelCase__: Optional[Any] = prediction_length + label_length
UpperCamelCase__: List[Any] = label_length
UpperCamelCase__: Tuple = moving_average
UpperCamelCase__: Union[str, Any] = autocorrelation_factor
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = config.context_length + max(config.lags_sequence )
UpperCamelCase__: Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCamelCase__: List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCamelCase__: str = floats_tensor([self.batch_size, _past_length] )
UpperCamelCase__: Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCamelCase__: Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCamelCase__: Dict = floats_tensor([self.batch_size, config.prediction_length] )
UpperCamelCase__: str = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.get_config()
UpperCamelCase__: Optional[int] = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
UpperCamelCase__: Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Optional[int] = outputs.encoder_last_hidden_state
UpperCamelCase__: Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__: Union[str, Any] = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Tuple = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: List[Any] = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCamelCase__: Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCamelCase__: Tuple = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCamelCase__: List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCamelCase__: Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCamelCase__: List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCamelCase__: str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__: List[str] = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Tuple = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: str = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _a ( a__ , a__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
UpperCamelCase__ = (AutoformerForPrediction,) if is_torch_available() else ()
UpperCamelCase__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = AutoformerModelTester(self )
UpperCamelCase__: List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Optional[int] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCamelCase__: Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: List[str] = [*signature.parameters.keys()]
UpperCamelCase__: Any = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Optional[int] = True
UpperCamelCase__: List[Any] = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Dict = getattr(self.model_tester , "decoder_seq_length" , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Optional[Any] = getattr(self.model_tester , "encoder_seq_length" , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Any = getattr(self.model_tester , "d_model" , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Union[str, Any] = getattr(self.model_tester , "num_attention_heads" , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: str = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCamelCase__: Dict = True
UpperCamelCase__: Optional[int] = False
UpperCamelCase__: Optional[Any] = True
UpperCamelCase__: Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
UpperCamelCase__: List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase__: str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__: Dict = True
UpperCamelCase__: List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
UpperCamelCase__: Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase__: Union[str, Any] = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCamelCase__: Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
UpperCamelCase__: Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCamelCase__: List[str] = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCamelCase__: Dict = True
UpperCamelCase__: Union[str, Any] = True
UpperCamelCase__: str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
UpperCamelCase__: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase__: Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase_ ( A_="train-batch.pt"):
UpperCamelCase__: List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=_snake_case ,repo_type="dataset")
UpperCamelCase__: Optional[Any] = torch.load(_snake_case ,map_location=_snake_case)
return batch
@require_torch
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Any = prepare_batch()
with torch.no_grad():
UpperCamelCase__: int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCamelCase__: int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Tuple = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: str = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCamelCase__: List[Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCamelCase__: Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: List[str] = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCamelCase__: Any = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCamelCase__: List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Optional[int] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1e-1 ) )
| 149 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a : Any = logging.getLogger(__name__)
class a ( a__ ):
"""simple docstring"""
a : int = '''summarization'''
a : Tuple = ['''loss''']
a : List[Any] = ROUGE_KEYS
a : Union[str, Any] = '''rouge2'''
def __init__( self : Union[str, Any] , __lowercase : int , **__lowercase : str ) -> List[str]:
if hparams.sortish_sampler and hparams.gpus > 1:
__UpperCAmelCase : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , mode=self.mode , **SCREAMING_SNAKE_CASE__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__UpperCAmelCase : int = Path(self.output_dir ) / """metrics.json"""
__UpperCAmelCase : List[str] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Any = self.config.model_type
__UpperCAmelCase : int = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__UpperCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__UpperCAmelCase : Tuple = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__UpperCAmelCase : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__UpperCAmelCase : int = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__UpperCAmelCase : Optional[Any] = get_git_info()["""repo_sha"""]
__UpperCAmelCase : Optional[Any] = hparams.num_workers
__UpperCAmelCase : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__UpperCAmelCase : Dict = self.decoder_start_token_id
__UpperCAmelCase : Tuple = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Optional[int] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__UpperCAmelCase : List[str] = self.hparams.eval_max_gen_length
else:
__UpperCAmelCase : int = self.model.config.max_length
__UpperCAmelCase : Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase ( self : str , __lowercase : List[Any] ) -> Dict[str, List[str]]:
__UpperCAmelCase : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(SCREAMING_SNAKE_CASE__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__UpperCAmelCase : int = True
return readable_batch
def UpperCAmelCase ( self : List[Any] , __lowercase : int , **__lowercase : Union[str, Any] ) -> Optional[int]:
return self.model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[str] ) -> List[Any]:
__UpperCAmelCase : Any = self.tokenizer.batch_decode(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return lmap(str.strip , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( self : Any , __lowercase : str ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.tokenizer.pad_token_id
__UpperCAmelCase : Union[str, Any] = batch["""input_ids"""], batch["""attention_mask"""]
__UpperCAmelCase : Optional[Any] = batch["""labels"""]
if isinstance(self.model , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Union[str, Any] = self.model._shift_right(SCREAMING_SNAKE_CASE__ )
else:
__UpperCAmelCase : Union[str, Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__UpperCAmelCase : Tuple = decoder_input_ids
self.save_readable_batch(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Dict = self(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__UpperCAmelCase : Any = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE__ )
assert lm_logits.shape[-1] == self.vocab_size
__UpperCAmelCase : int = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__UpperCAmelCase : Optional[int] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
__UpperCAmelCase : int = label_smoothed_nll_loss(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.hparams.label_smoothing , ignore_index=SCREAMING_SNAKE_CASE__ )
return (loss,)
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
return self.tokenizer.pad_token_id
def UpperCAmelCase ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = self._step(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Dict = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) )
# tokens per batch
__UpperCAmelCase : List[Any] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__UpperCAmelCase : str = batch["""input_ids"""].shape[0]
__UpperCAmelCase : List[str] = batch["""input_ids"""].eq(self.pad ).sum()
__UpperCAmelCase : List[Any] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase ( self : Any , __lowercase : int , __lowercase : Optional[int] ) -> Dict:
return self._generative_step(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( self : str , __lowercase : Dict , __lowercase : Tuple="val" ) -> Dict:
self.step_count += 1
__UpperCAmelCase : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__UpperCAmelCase : Tuple = losses["""loss"""]
__UpperCAmelCase : List[str] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__UpperCAmelCase : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__UpperCAmelCase : torch.FloatTensor = torch.tensor(SCREAMING_SNAKE_CASE__ ).type_as(SCREAMING_SNAKE_CASE__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : str = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__UpperCAmelCase : Tuple = self.step_count
self.metrics[prefix].append(SCREAMING_SNAKE_CASE__ ) # callback writes this to self.metrics_save_path
__UpperCAmelCase : int = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def UpperCAmelCase ( self : Any , __lowercase : Tuple , __lowercase : Any ) -> Dict:
return calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( self : int , __lowercase : Tuple ) -> dict:
__UpperCAmelCase : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__UpperCAmelCase : int = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__UpperCAmelCase : Optional[Any] = (time.time() - ta) / batch["""input_ids"""].shape[0]
__UpperCAmelCase : List[str] = self.ids_to_clean_text(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
__UpperCAmelCase : str = self._step(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) )
__UpperCAmelCase : Dict = self.calc_generative_metrics(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : int = np.mean(lmap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
base_metrics.update(gen_time=SCREAMING_SNAKE_CASE__ , gen_len=SCREAMING_SNAKE_CASE__ , preds=SCREAMING_SNAKE_CASE__ , target=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return base_metrics
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
return self._generative_step(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( self : List[str] , __lowercase : Dict ) -> Any:
return self.validation_epoch_end(SCREAMING_SNAKE_CASE__ , prefix="""test""" )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Union[str, Any] ) -> SeqaSeqDataset:
__UpperCAmelCase : Optional[Any] = self.n_obs[type_path]
__UpperCAmelCase : Tuple = self.target_lens[type_path]
__UpperCAmelCase : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[str] = False ) -> DataLoader:
__UpperCAmelCase : List[str] = self.get_dataset(SCREAMING_SNAKE_CASE__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__UpperCAmelCase : Tuple = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__UpperCAmelCase : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase ( self : str ) -> DataLoader:
__UpperCAmelCase : List[str] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE__ )
return dataloader
def UpperCAmelCase ( self : Optional[Any] ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase ( self : Tuple ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase ( __lowercase : Tuple , __lowercase : Tuple ) -> Dict:
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_generic_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--max_tokens_per_batch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--logger_name""" , type=SCREAMING_SNAKE_CASE__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=SCREAMING_SNAKE_CASE__ , default=500 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE__ , default="""summarization""" , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=SCREAMING_SNAKE_CASE__ , default=0.0 , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--src_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--tgt_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--eval_beams""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--val_metric""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=SCREAMING_SNAKE_CASE__ , default=1 , required=SCREAMING_SNAKE_CASE__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a ( a__ ):
"""simple docstring"""
a : Optional[Any] = '''translation'''
a : List[Any] = ['''loss''']
a : Optional[Any] = ['''bleu''']
a : Dict = '''bleu'''
def __init__( self : Optional[int] , __lowercase : int , **__lowercase : Tuple ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Any = hparams.src_lang
__UpperCAmelCase : Optional[Any] = hparams.tgt_lang
def UpperCAmelCase ( self : str , __lowercase : str , __lowercase : List[str] ) -> dict:
return calculate_bleu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str=None ):
Path(args.output_dir ).mkdir(exist_ok=_snake_case )
check_output_dir(_snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
__UpperCAmelCase : SummarizationModule = SummarizationModule(_snake_case )
else:
__UpperCAmelCase : SummarizationModule = TranslationModule(_snake_case )
__UpperCAmelCase : Tuple = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__UpperCAmelCase : int = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__UpperCAmelCase : Dict = os.environ.get("""WANDB_PROJECT""" , _snake_case )
__UpperCAmelCase : Optional[int] = WandbLogger(name=model.output_dir.name , project=_snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__UpperCAmelCase : List[Any] = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__UpperCAmelCase : Union[str, Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : int = args.val_metric == """loss"""
__UpperCAmelCase : pl.Trainer = generic_train(
_snake_case , _snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _snake_case ) , early_stopping_callback=_snake_case , logger=_snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__UpperCAmelCase : Dict = """"""
__UpperCAmelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_snake_case ) )
if checkpoints:
__UpperCAmelCase : Optional[Any] = checkpoints[-1]
__UpperCAmelCase : Union[str, Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
a : Optional[int] = pl.Trainer.add_argparse_args(parser)
a : int = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a : List[str] = parser.parse_args()
main(args)
| 114 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCAmelCase__ = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
UpperCAmelCase__ = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Any:
_lowercase =get_test_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
_lowercase =get_test_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
_lowercase ={"""BertModelTest""": """BertModelTester"""}
_lowercase ={
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __A (self ) -> Optional[Any]:
_lowercase =get_model_to_test_mapping(SCREAMING_SNAKE_CASE__ )
_lowercase =get_model_to_test_mapping(SCREAMING_SNAKE_CASE__ )
_lowercase ={
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_lowercase ={
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __A (self ) -> List[str]:
_lowercase =get_model_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
_lowercase =get_model_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
_lowercase ={
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_lowercase ={
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 5 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Any = 1_00_00_00 ):
lowercase_ : Union[str, Any] = set(range(3 , _snake_case , 2 ) )
primes.add(2 )
for p in range(3 , _snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _snake_case , _snake_case ) ) )
lowercase_ : int = [float(_snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(_snake_case , limit + 1 , _snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 213 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __A ( a_ :int , a_ :int , a_ :Optional[Any]) -> List[Any]:
__a : List[str] = SavedModel()
__a : Dict = []
with open(os.path.join(_snake_case , '''utils''' , '''tf_ops''' , '''onnx.json''')) as f:
__a : Any = json.load(_snake_case)["""opsets"""]
for i in range(1 , opset + 1):
onnx_ops.extend(onnx_opsets[str(_snake_case)])
with open(_snake_case , '''rb''') as f:
saved_model.ParseFromString(f.read())
__a : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
__a : int = sorted(_snake_case)
__a : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case)
if strict and len(_snake_case) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops)
elif len(_snake_case) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""")
print(*_snake_case , sep='''\n''')
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 160 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 0 |
import argparse
import os
import re
a : Any = 'src/transformers'
# Pattern that looks at the indentation in a line.
a : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a : Union[str, Any] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a : Optional[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a : List[str] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = _re_indent.search(_snake_case )
return "" if search is None else search.groups()[0]
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[int]="" , lowerCAmelCase__: List[str]=None , lowerCAmelCase__: Tuple=None ):
"""simple docstring"""
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: Dict = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_snake_case ):
index += 1
UpperCAmelCase_: Optional[Any] = ["""\n""".join(lines[:index] )]
else:
UpperCAmelCase_: List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_: Any = [lines[index]]
index += 1
while index < len(_snake_case ) and (end_prompt is None or not lines[index].startswith(_snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_snake_case ) )
if index < len(_snake_case ) - 1:
UpperCAmelCase_: Dict = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_: int = []
else:
blocks.append("""\n""".join(_snake_case ) )
UpperCAmelCase_: List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_snake_case ) > 0:
blocks.append("""\n""".join(_snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def lowerCAmelCase_ (lowerCAmelCase__: Any ):
"""simple docstring"""
def _inner(lowerCAmelCase__: Optional[int] ):
return key(_snake_case ).lower().replace("""_""" , """""" )
return _inner
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple=None ):
"""simple docstring"""
def noop(lowerCAmelCase__: Dict ):
return x
if key is None:
UpperCAmelCase_: str = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_: Dict = [obj for obj in objects if key(_snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_: List[str] = [obj for obj in objects if key(_snake_case )[0].isupper() and not key(_snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_: Optional[int] = [obj for obj in objects if not key(_snake_case )[0].isupper()]
UpperCAmelCase_: str = ignore_underscore(_snake_case )
return sorted(_snake_case , key=_snake_case ) + sorted(_snake_case , key=_snake_case ) + sorted(_snake_case , key=_snake_case )
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
def _replace(lowerCAmelCase__: Optional[Any] ):
UpperCAmelCase_: List[str] = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
UpperCAmelCase_: int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_: Optional[Any] = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(_snake_case )] ) + "]"
UpperCAmelCase_: Optional[Any] = import_statement.split("""\n""" )
if len(_snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_: Dict = 2 if lines[1].strip() == """[""" else 1
UpperCAmelCase_: Dict = [(i, _re_strip_line.search(_snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_: Optional[Any] = sort_objects(_snake_case , key=lambda lowerCAmelCase__ : x[1] )
UpperCAmelCase_: Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_: List[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase_: Dict = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_: Optional[int] = keys[:-1]
UpperCAmelCase_: str = get_indent(lines[1] ) + """, """.join([F'"{k}"' for k in sort_objects(_snake_case )] )
return "\n".join(_snake_case )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_: Dict = _re_bracket_content.sub(_replace , _snake_case )
return import_statement
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: str=True ):
"""simple docstring"""
with open(_snake_case , encoding="""utf-8""" ) as f:
UpperCAmelCase_: List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_: Optional[int] = split_code_in_indented_blocks(
_snake_case , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_: Optional[Any] = main_blocks[block_idx]
UpperCAmelCase_: Dict = block.split("""\n""" )
# Get to the start of the imports.
UpperCAmelCase_: int = 0
while line_idx < len(_snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_: Dict = len(_snake_case )
else:
line_idx += 1
if line_idx >= len(_snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_: Dict = """\n""".join(block_lines[line_idx:-1] )
UpperCAmelCase_: Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_: Dict = split_code_in_indented_blocks(_snake_case , indent_level=_snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_: Union[str, Any] = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_: Optional[Any] = [(pattern.search(_snake_case ).groups()[0] if pattern.search(_snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_: Tuple = [(i, key) for i, key in enumerate(_snake_case ) if key is not None]
UpperCAmelCase_: List[str] = [x[0] for x in sorted(_snake_case , key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_: Any = 0
UpperCAmelCase_: Union[str, Any] = []
for i in range(len(_snake_case ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_: Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_snake_case )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_: Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_snake_case ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_snake_case ) )
def lowerCAmelCase_ (lowerCAmelCase__: str=True ):
"""simple docstring"""
UpperCAmelCase_: int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
UpperCAmelCase_: Union[str, Any] = sort_imports(os.path.join(_snake_case , """__init__.py""" ) , check_only=_snake_case )
if result:
UpperCAmelCase_: Dict = [os.path.join(_snake_case , """__init__.py""" )]
if len(_snake_case ) > 0:
raise ValueError(F'Would overwrite {len(_snake_case )} files, run `make style`.' )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 147 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = parse_args()
# Import training_script as a module.
_UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase = script_fpath.stem
_UpperCAmelCase = importlib.import_module(_snake_case )
# Patch sys.argv
_UpperCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 289 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase_ ( snake_case_ ):
return (data["data"], data["target"])
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = XGBClassifier()
classifier.fit(_snake_case,_snake_case )
return classifier
def lowerCAmelCase_ ( ):
_A : Dict = load_iris()
_A : Dict = data_handling(_snake_case )
_A : str = train_test_split(
_snake_case,_snake_case,test_size=0.25 )
_A : int = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
_A : Optional[int] = xgboost(_snake_case,_snake_case )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_snake_case,_snake_case,_snake_case,display_labels=_snake_case,cmap="""Blues""",normalize="""true""",)
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
from __future__ import annotations
import queue
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = data
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Dict = None
def lowercase_ ( ):
"""simple docstring"""
print("\n********Press N to stop entering at any point of time********\n" )
lowerCamelCase__ : Union[str, Any] = input("Enter the value of the root node: " ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : int = TreeNode(int(_snake_case ) )
q.put(_snake_case )
while not q.empty():
lowerCamelCase__ : Dict = q.get()
lowerCamelCase__ : Any = F"Enter the left node of {node_found.data}: "
lowerCamelCase__ : List[Any] = input(_snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCamelCase__ : Union[str, Any] = TreeNode(int(_snake_case ) )
lowerCamelCase__ : List[Any] = left_node
q.put(_snake_case )
lowerCamelCase__ : int = F"Enter the right node of {node_found.data}: "
lowerCamelCase__ : Union[str, Any] = input(_snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCamelCase__ : Any = TreeNode(int(_snake_case ) )
lowerCamelCase__ : str = right_node
q.put(_snake_case )
raise
def lowercase_ ( _A : str ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowercase_ ( _A : str ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
lowerCamelCase__ : int = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase_ ( _A : Any ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
lowerCamelCase__ : int = []
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_snake_case )
def lowercase_ ( _A : Any ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : str = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(_snake_case )
lowerCamelCase__ : Optional[Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : Tuple = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Dict = n.right
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : Dict = node
while n or stack:
while n:
stack.append(_snake_case )
lowerCamelCase__ : str = n.left
lowerCamelCase__ : int = stack.pop()
print(n.data , end="," )
lowerCamelCase__ : Union[str, Any] = n.right
def lowercase_ ( _A : Dict ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not node:
return
lowerCamelCase__ : Tuple = [], []
lowerCamelCase__ : Union[str, Any] = node
stacka.append(_snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowercase_ ( _A : List[str] = "" , _A : Any=50 , _A : List[Any]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
lowerCamelCase__ : List[str] = divmod(width - len(_snake_case ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 184 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_a : Any= logging.get_logger(__name__)
_a : List[str]= {'vocab_file': 'vocab.txt'}
_a : List[Any]= {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_a : Optional[Any]= {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
_a : str= {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCamelCase ( a__ ):
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = ConvBertTokenizer
def __init__(self : str , _A : Optional[Any]=None , _A : Tuple=None , _A : int=True , _A : Optional[Any]="[UNK]" , _A : Optional[Any]="[SEP]" , _A : Tuple="[PAD]" , _A : Any="[CLS]" , _A : Tuple="[MASK]" , _A : int=True , _A : List[str]=None , **_A : str , ) -> Any:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__snake_case : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type'))
__snake_case : Any = do_lower_case
__snake_case : Union[str, Any] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Any = normalizer_class(**SCREAMING_SNAKE_CASE__)
__snake_case : Optional[Any] = do_lower_case
def _lowercase (self : Optional[int] , _A : Optional[int] , _A : List[Any]=None) -> Optional[int]:
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : List[Any] , _A : Tuple , _A : int = None) -> List[int]:
__snake_case : List[Any] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Optional[Any] , _A : Optional[int] , _A : List[str] = None) -> Tuple[str]:
__snake_case : str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 172 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( a__ , a__ ):
UpperCamelCase = '''nat'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :int , __UpperCamelCase :List[str]=4 , __UpperCamelCase :Optional[int]=3 , __UpperCamelCase :Dict=64 , __UpperCamelCase :List[str]=[3, 4, 6, 5] , __UpperCamelCase :Optional[Any]=[2, 4, 8, 16] , __UpperCamelCase :int=7 , __UpperCamelCase :Union[str, Any]=3.0 , __UpperCamelCase :List[str]=True , __UpperCamelCase :Optional[int]=0.0 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :Tuple="gelu" , __UpperCamelCase :str=0.02 , __UpperCamelCase :Optional[int]=1e-5 , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=None , **__UpperCamelCase :Optional[int] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(SCREAMING_SNAKE_CASE__ )
A = num_heads
A = kernel_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
A = layer_scale_init_value
A = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
A = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
| 292 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase__: str = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCamelCase__: int = """The dog is cute and lives in the garden house"""
UpperCamelCase__: Any = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] )
UpperCamelCase__: Dict = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__: Optional[int] = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
UpperCamelCase__: Optional[Any] = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
| 149 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[int] ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def UpperCAmelCase ( self : Any , __lowercase : Tuple , __lowercase : Optional[int] ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__ )
class a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase ( self : str , __lowercase : Dict , __lowercase : int ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def UpperCAmelCase ( self : Optional[int] , __lowercase : int , __lowercase : Tuple ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( ):
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def lowerCamelCase__ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class a ( a__ ):
"""simple docstring"""
@require_beam
def UpperCAmelCase ( self : List[str] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase : Optional[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__UpperCAmelCase : str = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCAmelCase ( self : Any ) -> Tuple:
import apache_beam as beam
__UpperCAmelCase : Optional[Any] = beam.io.parquetio.WriteToParquet
__UpperCAmelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase : Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__UpperCAmelCase : str = partial(SCREAMING_SNAKE_CASE__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__UpperCAmelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase : Tuple = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : List[str] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCAmelCase : Dict = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__UpperCAmelCase : Any = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 114 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
UpperCAmelCase__ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__snake_case ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__snake_case ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__snake_case ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowercase =get_distrib(node.left )
_lowercase =get_distrib(node.right )
_lowercase =1 - left_distrib_excess
_lowercase =1 - right_distrib_excess
_lowercase =(
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
_lowercase =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase ( a__ ):
lowercase = '''visual_bert'''
def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=512 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
lowercase_ : List[Any] = vocab_size
lowercase_ : List[str] = max_position_embeddings
lowercase_ : str = hidden_size
lowercase_ : Optional[Any] = visual_embedding_dim
lowercase_ : str = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : Optional[Any] = bypass_transformer
lowercase_ : List[str] = special_visual_initialize
| 213 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
A = 'bert-base-cased'
A = 'fp16'
A = 'bf16'
A = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __lowercase ( a__ ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().setUp()
__a : Optional[int] = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def _lowerCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = self.dist_env.copy()
__a : Optional[Any] = f"""{i + 1}"""
__a : int = strategy
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowerCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = self.dist_env.copy()
__a : Union[str, Any] = prefetch_policy
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : int = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowerCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(SCREAMING_SNAKE_CASE__ ):
__a : List[str] = self.dist_env.copy()
__a : List[str] = state_dict_type
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowerCamelCase ( self ):
__a : Any = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
for policy in FSDP_AUTO_WRAP_POLICY:
__a : int = self.dist_env.copy()
__a : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
__a : Union[str, Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
__a : Union[str, Any] = """2000"""
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__a : List[str] = self.dist_env.copy()
__a : str = """TRANSFORMER_BASED_WRAP"""
__a : List[Any] = """T5Layer"""
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE__ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
__a : Dict = self.dist_env.copy()
__a : List[Any] = """SIZE_BASED_WRAP"""
__a : int = """0"""
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowerCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__a : List[Any] = self.dist_env.copy()
__a : Optional[int] = mp_dtype
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
__a : str = torch.floataa
elif mp_dtype == "bf16":
__a : int = torch.bfloataa
__a : List[Any] = MixedPrecision(param_dtype=SCREAMING_SNAKE_CASE__ , reduce_dtype=SCREAMING_SNAKE_CASE__ , buffer_dtype=SCREAMING_SNAKE_CASE__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , SCREAMING_SNAKE_CASE__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , SCREAMING_SNAKE_CASE__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__a : Tuple = self.dist_env.copy()
__a : Any = str(SCREAMING_SNAKE_CASE__ ).lower()
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
__a : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=SCREAMING_SNAKE_CASE__ ) )
@require_fsdp
@require_multi_gpu
@slow
class __lowercase ( a__ ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().setUp()
__a : Optional[int] = 0.8_2
__a : Optional[int] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
__a : Dict = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__a : List[str] = 160
__a : Any = 160
__a : Optional[Any] = inspect.getfile(accelerate.test_utils )
__a : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def _lowerCamelCase ( self ):
__a : Optional[int] = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
__a : Union[str, Any] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
__a : Optional[int] = cmd.copy()
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
def _lowerCamelCase ( self ):
__a : str = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
__a : List[str] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
__a : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__a : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
__a : Dict = cmd_config[:-1]
__a : Any = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
def _lowerCamelCase ( self ):
__a : str = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
__a : int = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__a : List[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() ) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Tuple = logging.get_logger(__name__)
a : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
a : str = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_: List[Any] = bs[:]
UpperCAmelCase_: Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_: List[Any] = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = set()
UpperCAmelCase_: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_: Tuple = char
return pairs
class _a ( a__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCAmelCase_: List[str] = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else bos_token
UpperCAmelCase_: Dict = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else eos_token
UpperCAmelCase_: str = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else sep_token
UpperCAmelCase_: Any = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else cls_token
UpperCAmelCase_: Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else unk_token
UpperCAmelCase_: Any = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_: int = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__, bos_token=SCREAMING_SNAKE_CASE__, eos_token=SCREAMING_SNAKE_CASE__, unk_token=SCREAMING_SNAKE_CASE__, sep_token=SCREAMING_SNAKE_CASE__, cls_token=SCREAMING_SNAKE_CASE__, pad_token=SCREAMING_SNAKE_CASE__, mask_token=SCREAMING_SNAKE_CASE__, add_prefix_space=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__, )
with open(SCREAMING_SNAKE_CASE__, encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_: int = json.load(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Dict = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_: List[Any] = errors # how to handle errors in decoding
UpperCAmelCase_: List[str] = bytes_to_unicode()
UpperCAmelCase_: List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__, encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_: Dict = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_: Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_: Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__, range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
UpperCAmelCase_: Optional[int] = {}
UpperCAmelCase_: List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_: Tuple = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __snake_case (self ) -> int:
return len(self.encoder )
def __snake_case (self ) -> int:
return dict(self.encoder, **self.added_tokens_encoder )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_: int = tuple(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
UpperCAmelCase_: Optional[int] = min(SCREAMING_SNAKE_CASE__, key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_: List[Any] = bigram
UpperCAmelCase_: Any = []
UpperCAmelCase_: Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
UpperCAmelCase_: Tuple = word.index(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_: Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_: str = tuple(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[Any] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
UpperCAmelCase_: Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Union[str, Any] = word
return word
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Dict = []
for token in re.findall(self.pat, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_: List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(""" """ ) )
return bpe_tokens
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.encoder.get(SCREAMING_SNAKE_CASE__, self.encoder.get(self.unk_token ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = """""".join(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""", errors=self.errors )
return text
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_: List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_: Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE__, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=SCREAMING_SNAKE_CASE__, ensure_ascii=SCREAMING_SNAKE_CASE__ ) + """\n""" )
UpperCAmelCase_: Union[str, Any] = 0
with open(SCREAMING_SNAKE_CASE__, """w""", encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_: str = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__, token_ids_a=SCREAMING_SNAKE_CASE__, already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Optional[int] = [self.sep_token_id]
UpperCAmelCase_: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Union[str, Any] = kwargs.pop("""add_prefix_space""", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
UpperCAmelCase_: List[Any] = """ """ + text
return (text, kwargs)
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> int:
return token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[int]:
UpperCAmelCase_: Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Dict = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
UpperCAmelCase_: Any = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 147 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_snake_case : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : bool = field(default=a__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case : Optional[str] = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_snake_case : Optional[str] = field(
default=a__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_snake_case : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=a__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
_UpperCAmelCase = import_module("""tasks""" )
try:
_UpperCAmelCase = getattr(_snake_case ,model_args.task_type )
_UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,_snake_case )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
_UpperCAmelCase = dict(enumerate(_snake_case ) )
_UpperCAmelCase = len(_snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid={label: i for i, label in enumerate(_snake_case )} ,cache_dir=model_args.cache_dir ,)
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,)
_UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_snake_case ,cache_dir=model_args.cache_dir ,)
# Get datasets
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=_snake_case ,data_dir=data_args.data_dir ,tokenizer=_snake_case ,labels=_snake_case ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=_snake_case ,data_dir=data_args.data_dir ,tokenizer=_snake_case ,labels=_snake_case ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def align_predictions(lowercase ,lowercase ) -> Tuple[List[int], List[int]]:
_UpperCAmelCase = np.argmax(_snake_case ,axis=2 )
_UpperCAmelCase = preds.shape
_UpperCAmelCase = [[] for _ in range(_snake_case )]
_UpperCAmelCase = [[] for _ in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase ) -> Dict:
_UpperCAmelCase = align_predictions(p.predictions ,p.label_ids )
return {
"accuracy_score": accuracy_score(_snake_case ,_snake_case ),
"precision": precision_score(_snake_case ,_snake_case ),
"recall": recall_score(_snake_case ,_snake_case ),
"f1": fa_score(_snake_case ,_snake_case ),
}
# Data collator
_UpperCAmelCase = DataCollatorWithPadding(_snake_case ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=_snake_case ,eval_dataset=_snake_case ,compute_metrics=_snake_case ,data_collator=_snake_case ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,_snake_case ,_snake_case )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_snake_case )
# Predict
if training_args.do_predict:
_UpperCAmelCase = TokenClassificationDataset(
token_classification_task=_snake_case ,data_dir=data_args.data_dir ,tokenizer=_snake_case ,labels=_snake_case ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,)
_UpperCAmelCase = trainer.predict(_snake_case )
_UpperCAmelCase = align_predictions(_snake_case ,_snake_case )
_UpperCAmelCase = os.path.join(training_args.output_dir ,"""test_results.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" ,_snake_case ,_snake_case )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
_UpperCAmelCase = os.path.join(training_args.output_dir ,"""test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
with open(os.path.join(data_args.data_dir ,"""test.txt""" ) ,"""r""" ) as f:
token_classification_task.write_predictions_to_file(_snake_case ,_snake_case ,_snake_case )
return results
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 289 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = set()
# edges = list of graph's edges
_A : List[Any] = get_edges(_snake_case )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_A : Optional[int] = edges.pop()
chosen_vertices.add(_snake_case )
chosen_vertices.add(_snake_case )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_snake_case )
return chosen_vertices
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 26 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 0 |
import math
import sys
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = """"""
try:
with open(_snake_case , "rb" ) as binary_file:
lowerCamelCase__ : Tuple = binary_file.read()
for dat in data:
lowerCamelCase__ : Optional[int] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int = {"""0""": """0""", """1""": """1"""}
lowerCamelCase__ : Union[str, Any] = """""", """"""
lowerCamelCase__ : Any = len(_snake_case )
for i in range(len(_snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase__ : str = lexicon[curr_string]
result += last_match_id
lowerCamelCase__ : Union[str, Any] = last_match_id + """0"""
if math.loga(_snake_case ).is_integer():
lowerCamelCase__ : Optional[int] = {}
for curr_key in list(_snake_case ):
lowerCamelCase__ : Any = lexicon.pop(_snake_case )
lowerCamelCase__ : List[str] = new_lex
lowerCamelCase__ : Optional[Any] = last_match_id + """1"""
index += 1
lowerCamelCase__ : Dict = """"""
return result
def lowercase_ ( _A : Union[str, Any] , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple = 8
try:
with open(_snake_case , "wb" ) as opened_file:
lowerCamelCase__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(_snake_case ) , _snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_snake_case , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCamelCase__ : Optional[Any] = data_bits[counter:]
lowerCamelCase__ : Optional[Any] = data_bits[counter + 1 :]
return data_bits
def lowercase_ ( _A : List[str] , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int = read_file_binary(_snake_case )
lowerCamelCase__ : List[Any] = remove_prefix(_snake_case )
lowerCamelCase__ : List[Any] = decompress_data(_snake_case )
write_file_binary(_snake_case , _snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 184 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not postfix_notation:
return 0
__snake_case : Optional[Any] = {"""+""", """-""", """*""", """/"""}
__snake_case : list[Any] = []
for token in postfix_notation:
if token in operations:
__snake_case : Optional[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _UpperCAmelCase ( a__ ):
def lowerCamelCase ( self :Any ):
A = SMALL_MODEL_IDENTIFIER
A = """pt"""
A = """tf"""
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[Any] ):
A = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any] ):
A = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE__ )
model_tf.save_pretrained(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :Optional[Any] ):
A = """mock_framework"""
# Framework provided - return whatever the user provides
A = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ )
A = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ )
A = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :List[Any] ):
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ )
A = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ )
A = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :str ):
A = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_tf_available" , SCREAMING_SNAKE_CASE__ ):
A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
A = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_torch_available" , SCREAMING_SNAKE_CASE__ ):
A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_tf )
# Both in environment -> use PyTorch
A = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
A = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_tf_available" , SCREAMING_SNAKE_CASE__ ), patch(
"transformers.onnx.features.is_torch_available" , SCREAMING_SNAKE_CASE__ ):
A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt )
# Both not in environment -> raise error
A = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
A = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("transformers.onnx.features.is_tf_available" , SCREAMING_SNAKE_CASE__ ), patch(
"transformers.onnx.features.is_torch_available" , SCREAMING_SNAKE_CASE__ ):
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A = FeaturesManager.determine_framework(self.test_model )
| 292 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_ ( A_):
if "model" in orig_key:
UpperCamelCase__: Union[str, Any] = orig_key.replace("model." ,"")
if "norm1" in orig_key:
UpperCamelCase__: str = orig_key.replace("norm1" ,"attention.output.LayerNorm")
if "norm2" in orig_key:
UpperCamelCase__: Optional[Any] = orig_key.replace("norm2" ,"output.LayerNorm")
if "norm" in orig_key:
UpperCamelCase__: Any = orig_key.replace("norm" ,"LayerNorm")
if "transformer" in orig_key:
UpperCamelCase__: str = orig_key.split(".")[0].split("_")[-1]
UpperCamelCase__: List[str] = orig_key.replace(F"transformer_{layer_num}" ,F"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
UpperCamelCase__: str = orig_key.replace("mha.attn" ,"attention.self")
if "mha" in orig_key:
UpperCamelCase__: Optional[int] = orig_key.replace("mha" ,"attention")
if "W_q" in orig_key:
UpperCamelCase__: Any = orig_key.replace("W_q" ,"self.query")
if "W_k" in orig_key:
UpperCamelCase__: str = orig_key.replace("W_k" ,"self.key")
if "W_v" in orig_key:
UpperCamelCase__: Tuple = orig_key.replace("W_v" ,"self.value")
if "ff1" in orig_key:
UpperCamelCase__: Optional[Any] = orig_key.replace("ff1" ,"intermediate.dense")
if "ff2" in orig_key:
UpperCamelCase__: Tuple = orig_key.replace("ff2" ,"output.dense")
if "ff" in orig_key:
UpperCamelCase__: Any = orig_key.replace("ff" ,"output.dense")
if "mlm_class" in orig_key:
UpperCamelCase__: List[str] = orig_key.replace("mlm.mlm_class" ,"cls.predictions.decoder")
if "mlm" in orig_key:
UpperCamelCase__: int = orig_key.replace("mlm" ,"cls.predictions.transform")
if "cls" not in orig_key:
UpperCamelCase__: str = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase_ ( A_ ,A_):
for key in orig_state_dict.copy().keys():
UpperCamelCase__: str = orig_state_dict.pop(_snake_case)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCamelCase__: int = val
UpperCamelCase__: Optional[int] = orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCamelCase__: Tuple = torch.arange(_snake_case).expand((1, -1)) + 2
return orig_state_dict
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: Optional[int] = torch.load(_snake_case ,map_location="cpu")["""model_state_dict"""]
UpperCamelCase__: Dict = YosoConfig.from_json_file(_snake_case)
UpperCamelCase__: Optional[Any] = YosoForMaskedLM(_snake_case)
UpperCamelCase__: str = convert_checkpoint_helper(config.max_position_embeddings ,_snake_case)
print(model.load_state_dict(_snake_case))
model.eval()
model.save_pretrained(_snake_case)
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
A__: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 149 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class a ( a__ ):
"""simple docstring"""
a : Optional[int] = '''audio-spectrogram-transformer'''
def __init__( self : Optional[int] , __lowercase : int=768 , __lowercase : Union[str, Any]=12 , __lowercase : Dict=12 , __lowercase : str=3072 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.0 , __lowercase : Any=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=1e-1_2 , __lowercase : Optional[Any]=16 , __lowercase : Any=True , __lowercase : Tuple=10 , __lowercase : Union[str, Any]=10 , __lowercase : str=1024 , __lowercase : Tuple=128 , **__lowercase : int , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Optional[int] = qkv_bias
__UpperCAmelCase : Optional[int] = frequency_stride
__UpperCAmelCase : Any = time_stride
__UpperCAmelCase : Optional[int] = max_length
__UpperCAmelCase : Any = num_mel_bins
| 114 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def UpperCAmelCase_ ( __snake_case ) -> Dict:
"""simple docstring"""
if hor == 128:
_lowercase =("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowercase =(32, 128, 256)
_lowercase =("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_lowercase =("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowercase =(32, 64, 128, 256)
_lowercase =("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_lowercase =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
_lowercase =model.state_dict()
_lowercase ={
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_lowercase =UNetaDModel(**_snake_case )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
_lowercase =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowercase =state_dict.pop(_snake_case )
hf_value_function.load_state_dict(_snake_case )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , '''w''' ) as f:
json.dump(_snake_case , _snake_case )
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase ={
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_lowercase =torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
_lowercase =model
_lowercase =UNetaDModel(**_snake_case )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
_lowercase =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowercase =state_dict.pop(_snake_case )
hf_value_function.load_state_dict(_snake_case )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(_snake_case , _snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 5 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCamelCase ( a__ ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = tempfile.mkdtemp()
lowercase_ : Union[str, Any] = 8
# DPR tok
lowercase_ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase_ : Dict = os.path.join(self.tmpdirname ,'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase_ : str = os.path.join(SCREAMING_SNAKE_CASE__ ,DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowercase_ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase_ : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase_ : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Tuple = {"""unk_token""": """<unk>"""}
lowercase_ : Any = os.path.join(self.tmpdirname ,'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase_ : str = os.path.join(SCREAMING_SNAKE_CASE__ ,BART_VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(SCREAMING_SNAKE_CASE__ ,BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) )
def _UpperCAmelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'bart_tokenizer' ) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname ,'rag_tokenizer' )
lowercase_ : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
lowercase_ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase_ : Dict = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ,config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
lowercase_ : Optional[int] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowercase_ : int = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
lowercase_ : int = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowercase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 213 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ :Union[str, Any] , a_ :Optional[Any] , a_ :List[Any] = False) -> List[Any]:
if radian_mode:
return [magnitude * cos(_snake_case), magnitude * sin(_snake_case)]
return [magnitude * cos(radians(_snake_case)), magnitude * sin(radians(_snake_case))]
def __A ( a_ :Any , a_ :int , a_ :List[str] = 10**-1) -> List[Any]:
__a : NDArray[floataa] = cross(_snake_case , _snake_case)
__a : float = sum(_snake_case)
return abs(_snake_case) < eps
if __name__ == "__main__":
# Test to check if it works
A = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
A = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
A = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
A = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 160 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: List[str] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = list(_snake_case )
UpperCAmelCase_: Union[str, Any] = list(_snake_case )
UpperCAmelCase_: List[Any] = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_: Any = """_"""
if count > 1:
return False
else:
return "".join(_snake_case )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Dict = []
while True:
UpperCAmelCase_: List[Any] = ["""$"""] * len(_snake_case )
UpperCAmelCase_: List[Any] = []
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
UpperCAmelCase_: Any = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase_: Tuple = """*"""
UpperCAmelCase_: int = """*"""
temp.append("""X""" )
for i in range(len(_snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_snake_case ) == 0:
return pi
UpperCAmelCase_: List[str] = list(set(_snake_case ) )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: Tuple = []
for minterm in minterms:
UpperCAmelCase_: Optional[int] = """"""
for _ in range(_snake_case ):
UpperCAmelCase_: Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_snake_case )
return temp
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
UpperCAmelCase_: str = list(_snake_case )
UpperCAmelCase_: Union[str, Any] = list(_snake_case )
UpperCAmelCase_: str = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = []
UpperCAmelCase_: List[str] = [0] * len(_snake_case )
for i in range(len(chart[0] ) ):
UpperCAmelCase_: Optional[int] = 0
UpperCAmelCase_: Optional[int] = -1
for j in range(len(_snake_case ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_: List[str] = j
if count == 1:
UpperCAmelCase_: Any = 1
for i in range(len(_snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_snake_case ) ):
UpperCAmelCase_: List[Any] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_: Any = 0
UpperCAmelCase_: str = -1
UpperCAmelCase_: List[Any] = 0
for i in range(len(_snake_case ) ):
UpperCAmelCase_: str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_: Union[str, Any] = count_n
UpperCAmelCase_: str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_snake_case ) ):
UpperCAmelCase_: Dict = 0
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = [[0 for x in range(len(_snake_case ) )] for x in range(len(_snake_case ) )]
for i in range(len(_snake_case ) ):
UpperCAmelCase_: Optional[int] = prime_implicants[i].count("""_""" )
for j in range(len(_snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , _snake_case ):
UpperCAmelCase_: Tuple = 1
return chart
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase_: Tuple = [
float(_snake_case )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase_: Union[str, Any] = decimal_to_binary(_snake_case , _snake_case )
UpperCAmelCase_: str = check(_snake_case )
print("""Prime Implicants are:""" )
print(_snake_case )
UpperCAmelCase_: Optional[Any] = prime_implicant_chart(_snake_case , _snake_case )
UpperCAmelCase_: Tuple = selection(_snake_case , _snake_case )
print("""Essential Prime Implicants are:""" )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 147 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( a__,a__,unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self ) -> Union[str, Any]:
return self._get_dummy_components()
def a__ ( self , _a , _a=0 ) -> List[Any]:
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
_A : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
_A : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def a__ ( self ) -> List[str]:
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self ) -> Tuple:
self._test_save_load_local()
def a__ ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_A : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_A : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A : List[str] = None
_A : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
_A : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
_A : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a__ ( self , _a , _a , _a , _a ) -> Optional[int]:
_start_torch_memory_measurement()
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
_A : int = output.images[0]
assert image.shape == (64, 64, 3)
_A : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
_A : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
_A : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
_A : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a__ ( self , _a , _a , _a , _a ) -> str:
_start_torch_memory_measurement()
_A : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
_A : Dict = output.images[0]
assert image.shape == (64, 64, 3)
_A : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
_A : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
_A : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_A : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a__ ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_start_torch_memory_measurement()
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
_A : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
_A : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
_A : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
_A : Dict = output.images[0]
assert image.shape == (256, 256, 3)
_A : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A : str = 'http://www.mocksite.com/file1.txt'
A : List[str] = '"text": ["foo", "foo"]'
A : List[Any] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _lowercase :
"""simple docstring"""
A__ = 2_00
A__ = {'''Content-Length''': '''100'''}
A__ = {}
def lowerCAmelCase ( self : Dict , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
return [bytes(SCREAMING_SNAKE_CASE__ , "utf-8" )]
def lowercase_ ( *_A : Dict , **_A : Optional[Any] ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def lowercase_ ( _A : Union[str, Any] , _A : int , _A : str ):
"""simple docstring"""
import requests
monkeypatch.setattr(_snake_case , "request" , _snake_case )
lowerCamelCase__ : str = URL
if issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Optional[int] = url
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Union[str, Any] = [url]
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : int = {"""train""": url}
lowerCamelCase__ : Tuple = """dummy"""
lowerCamelCase__ : Dict = """downloads"""
lowerCamelCase__ : List[Any] = tmp_path
lowerCamelCase__ : Tuple = DownloadConfig(
cache_dir=os.path.join(_snake_case , _snake_case ) , use_etag=_snake_case , )
lowerCamelCase__ : Any = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
lowerCamelCase__ : Tuple = dl_manager.download(_snake_case )
lowerCamelCase__ : Optional[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : Optional[int] = [downloaded_paths]
lowerCamelCase__ : Union[str, Any] = [urls]
elif isinstance(_snake_case , _snake_case ):
assert "train" in downloaded_paths.keys()
lowerCamelCase__ : Dict = downloaded_paths.values()
lowerCamelCase__ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_snake_case , _snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase__ : Dict = Path(_snake_case )
lowerCamelCase__ : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase__ : Optional[int] = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
lowerCamelCase__ : Any = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def lowercase_ ( _A : Optional[int] , _A : int , _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : Dict = str(_snake_case )
if issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Optional[int] = filename
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : int = [filename]
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Union[str, Any] = {"""train""": filename}
lowerCamelCase__ : List[Any] = """dummy"""
lowerCamelCase__ : List[str] = xz_file.parent
lowerCamelCase__ : Dict = """extracted"""
lowerCamelCase__ : Optional[int] = DownloadConfig(
cache_dir=_snake_case , use_etag=_snake_case , )
lowerCamelCase__ : Tuple = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
lowerCamelCase__ : Dict = dl_manager.extract(_snake_case )
lowerCamelCase__ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : int = [extracted_paths]
lowerCamelCase__ : Optional[Any] = [paths]
elif isinstance(_snake_case , _snake_case ):
assert "train" in extracted_paths.keys()
lowerCamelCase__ : str = extracted_paths.values()
lowerCamelCase__ : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_snake_case , _snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase__ : Tuple = Path(_snake_case )
lowerCamelCase__ : List[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_snake_case , etag=_snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase__ : Union[str, Any] = extracted_path.read_text()
lowerCamelCase__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase_ ( _A : Union[str, Any] , _A : int ):
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_snake_case , start=1 ):
lowerCamelCase__ : List[str] = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def lowercase_ ( _A : List[str] , _A : Any ):
"""simple docstring"""
lowerCamelCase__ : str = request.getfixturevalue(_snake_case )
lowerCamelCase__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def lowercase_ ( _A : Tuple , _A : str ):
"""simple docstring"""
lowerCamelCase__ : Dict = request.getfixturevalue(_snake_case )
lowerCamelCase__ : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_snake_case ) , start=1 ):
assert os.path.basename(_snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 184 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_snake_case , _snake_case ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_snake_case ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 0 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
_snake_case : List[Any] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
_snake_case : Union[str, Any] = {
'jukebox': 512,
}
class _UpperCAmelCase ( a__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_LYRIC_TOKENS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :int , __UpperCamelCase :str , __UpperCamelCase :Optional[int] , __UpperCamelCase :Tuple , __UpperCamelCase :Tuple=["v3", "v2", "v2"] , __UpperCamelCase :List[str]=5_12 , __UpperCamelCase :List[str]=5 , __UpperCamelCase :Union[str, Any]="<|endoftext|>" , **__UpperCamelCase :Tuple , ):
A = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
super().__init__(
unk_token=SCREAMING_SNAKE_CASE__ , n_genres=SCREAMING_SNAKE_CASE__ , version=SCREAMING_SNAKE_CASE__ , max_n_lyric_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A = version
A = max_n_lyric_tokens
A = n_genres
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
A = json.load(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
A = json.load(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
A = json.load(SCREAMING_SNAKE_CASE__ )
A = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
A = oov.replace(r"\-'" , r"\-+'" )
A = regex.compile(SCREAMING_SNAKE_CASE__ )
A = {v: k for k, v in self.artists_encoder.items()}
A = {v: k for k, v in self.genres_encoder.items()}
A = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCamelCase ( self :Any ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCamelCase ( self :List[Any] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :int ):
A = [self.artists_encoder.get(SCREAMING_SNAKE_CASE__ , 0 ) for artist in list_artists]
for genres in range(len(SCREAMING_SNAKE_CASE__ ) ):
A = [self.genres_encoder.get(SCREAMING_SNAKE_CASE__ , 0 ) for genre in list_genres[genres]]
A = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
A = [[self.lyrics_encoder.get(SCREAMING_SNAKE_CASE__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :List[Any] ):
return list(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Optional[int] , **__UpperCamelCase :Union[str, Any] ):
A = self.prepare_for_tokenization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = self._tokenize(SCREAMING_SNAKE_CASE__ )
return artist, genre, lyrics
def lowerCamelCase ( self :str , __UpperCamelCase :Any , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :List[Any] = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
A = artists[idx].lower()
A = [genres[idx].lower()]
else:
A = self._normalize(artists[idx] ) + """.v2"""
A = [
self._normalize(SCREAMING_SNAKE_CASE__ ) + """.v2""" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
A = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
A = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
A = {vocab[index]: index + 1 for index in range(len(SCREAMING_SNAKE_CASE__ ) )}
A = 0
A = len(SCREAMING_SNAKE_CASE__ ) + 1
A = self.vocab
A = {v: k for k, v in self.vocab.items()}
A = """"""
else:
A = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
A = self._run_strip_accents(SCREAMING_SNAKE_CASE__ )
A = lyrics.replace("\\" , "\n" )
A = self.out_of_vocab.sub("" , SCREAMING_SNAKE_CASE__ ), [], []
return artists, genres, lyrics
def lowerCamelCase ( self :Tuple , __UpperCamelCase :int ):
A = unicodedata.normalize("NFD" , SCREAMING_SNAKE_CASE__ )
A = []
for char in text:
A = unicodedata.category(SCREAMING_SNAKE_CASE__ )
if cat == "Mn":
continue
output.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :List[str] ):
A = (
[chr(SCREAMING_SNAKE_CASE__ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(SCREAMING_SNAKE_CASE__ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(SCREAMING_SNAKE_CASE__ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["""."""]
)
A = frozenset(SCREAMING_SNAKE_CASE__ )
A = re.compile(r"_+" )
A = """""".join([c if c in accepted else "_" for c in text.lower()] )
A = pattern.sub("_" , SCREAMING_SNAKE_CASE__ ).strip("_" )
return text
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[Any] ):
return " ".join(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Tuple = None , __UpperCamelCase :str = False ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A = TensorType(SCREAMING_SNAKE_CASE__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
A = tf.constant
A = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
A = torch.tensor
A = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
A = jnp.array
A = _is_jax
else:
A = np.asarray
A = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
A = [inputs]
if not is_tensor(SCREAMING_SNAKE_CASE__ ):
A = as_tensor(SCREAMING_SNAKE_CASE__ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Tuple , __UpperCamelCase :Union[str, Any]="" , __UpperCamelCase :Any="pt" ):
A = [0, 0, 0]
A = [artist] * len(self.version )
A = [genres] * len(self.version )
A = self.tokenize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = self._convert_token_to_id(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = [-INFINITY] * len(full_tokens[-1] )
A = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=SCREAMING_SNAKE_CASE__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[int] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=SCREAMING_SNAKE_CASE__ ) )
A = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=SCREAMING_SNAKE_CASE__ ) )
A = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=SCREAMING_SNAKE_CASE__ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Union[str, Any] ):
A = self.artists_decoder.get(SCREAMING_SNAKE_CASE__ )
A = [self.genres_decoder.get(SCREAMING_SNAKE_CASE__ ) for genre in genres_index]
A = [self.lyrics_decoder.get(SCREAMING_SNAKE_CASE__ ) for character in lyric_index]
return artist, genres, lyrics
| 292 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__: Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
A__: Any = 5_0003
A__: Tuple = 5_0002
@require_sentencepiece
@require_tokenizers
class _a ( a__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = PLBartTokenizer
UpperCamelCase__ = None
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__: List[Any] = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="base" , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Tuple = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="base" , keep_accents=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase__: int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase__: int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase__: Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCamelCase__: Dict = tokenizer.vocab_size
UpperCamelCase__: Union[str, Any] = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCamelCase__: str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
UpperCamelCase__: Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Any = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="multi" , keep_accents=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase__: Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase__: Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase__: Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCamelCase__: List[Any] = tokenizer.vocab_size
UpperCamelCase__: Dict = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCamelCase__: str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
UpperCamelCase__: str = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase__ = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase_ ( cls: List[Any] ):
'''simple docstring'''
UpperCamelCase__: PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCamelCase__: Dict = 1
return cls
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003 )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
UpperCamelCase__: int = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCamelCase__: Tuple = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: Tuple = 10
UpperCamelCase__: Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0004, 5_0001] )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Any = tempfile.mkdtemp()
UpperCamelCase__: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__: List[Any] = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
UpperCamelCase__: Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase__: Optional[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCamelCase__: Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="pt" )
UpperCamelCase__: int = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors="pt" )
UpperCamelCase__: Optional[int] = targets["""input_ids"""]
UpperCamelCase__: Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
} , )
| 149 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
a : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
a : str = frozenset(["prompt", "negative_prompt"])
a : List[Any] = frozenset([])
a : List[Any] = frozenset(["image"])
a : Any = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
a : List[str] = frozenset(["image"])
a : int = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
a : Dict = frozenset(["prompt", "image", "negative_prompt"])
a : Optional[int] = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
a : Optional[int] = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
a : Optional[int] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
a : Union[str, Any] = frozenset(["image", "mask_image"])
a : Optional[int] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
a : Tuple = frozenset(["example_image", "image", "mask_image"])
a : Optional[int] = frozenset(["class_labels"])
a : List[Any] = frozenset(["class_labels"])
a : str = frozenset(["batch_size"])
a : Any = frozenset([])
a : List[Any] = frozenset(["batch_size"])
a : Union[str, Any] = frozenset([])
a : Dict = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
a : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
a : Tuple = frozenset(["input_tokens"])
a : List[Any] = frozenset(["input_tokens"])
| 114 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=a__):
SCREAMING_SNAKE_CASE__ = ['''torch''', '''torchsde''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 5 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
"""simple docstring"""
import os
__SCREAMING_SNAKE_CASE ={'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Tuple = 0
lowercase_ : Dict = 0
while index < len(_snake_case ) - 1:
lowercase_ : List[Any] = SYMBOLS[numerals[index]]
lowercase_ : str = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : Union[str, Any] = """"""
lowercase_ : str = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase_ : str = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase__( __SCREAMING_SNAKE_CASE : int = "/p089_roman.txt" ):
lowercase_ : Dict = 0
with open(os.path.dirname(_snake_case ) + roman_numerals_filename ) as filea:
lowercase_ : Optional[int] = filea.readlines()
for line in lines:
lowercase_ : List[Any] = line.strip()
lowercase_ : str = parse_roman_numerals(_snake_case )
lowercase_ : Optional[int] = generate_roman_numerals(_snake_case )
savings += len(_snake_case ) - len(_snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 213 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : str = logging.get_logger(__name__)
a : int = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _a ( a__ ):
A = '''trajectory_transformer'''
A = ['''past_key_values''']
A = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=249, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=17, SCREAMING_SNAKE_CASE_=25, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0_0_0_6, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=50256, SCREAMING_SNAKE_CASE_=50256, **SCREAMING_SNAKE_CASE_, ) -> str:
UpperCAmelCase_: Optional[int] = vocab_size
UpperCAmelCase_: Optional[Any] = action_weight
UpperCAmelCase_: Union[str, Any] = reward_weight
UpperCAmelCase_: List[Any] = value_weight
UpperCAmelCase_: Union[str, Any] = max_position_embeddings
UpperCAmelCase_: Dict = block_size
UpperCAmelCase_: Tuple = action_dim
UpperCAmelCase_: List[str] = observation_dim
UpperCAmelCase_: Union[str, Any] = transition_dim
UpperCAmelCase_: List[str] = learning_rate
UpperCAmelCase_: Dict = n_layer
UpperCAmelCase_: Dict = n_head
UpperCAmelCase_: Union[str, Any] = n_embd
UpperCAmelCase_: str = embd_pdrop
UpperCAmelCase_: int = attn_pdrop
UpperCAmelCase_: Any = resid_pdrop
UpperCAmelCase_: Optional[Any] = initializer_range
UpperCAmelCase_: str = layer_norm_eps
UpperCAmelCase_: Optional[int] = kaiming_initializer_range
UpperCAmelCase_: int = use_cache
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__, bos_token_id=SCREAMING_SNAKE_CASE__, eos_token_id=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ )
| 147 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = str(_snake_case )
_UpperCAmelCase = [n]
for i in range(1 ,len(_snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(str(_snake_case ) ) > 3:
if not is_prime(int(str(_snake_case )[-3:] ) ) or not is_prime(int(str(_snake_case )[:3] ) ):
return False
return True
def __UpperCAmelCase ( lowercase = 11 ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(_snake_case ) != count:
if validate(_snake_case ):
_UpperCAmelCase = list_truncated_nums(_snake_case )
if all(is_prime(_snake_case ) for i in list_nums ):
list_truncated_primes.append(_snake_case )
num += 2
return list_truncated_primes
def __UpperCAmelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''')
| 289 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_snake_case = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "ernie_m"
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _a = 25_0002 , _a = 768 , _a = 12 , _a = 12 , _a = 3072 , _a = "gelu" , _a = 0.1 , _a = 0.1 , _a = 514 , _a = 0.02 , _a = 1 , _a = 1e-05 , _a=None , _a=False , _a=0.0 , **_a , ) -> str:
super().__init__(pad_token_id=_a , **_a )
_A : Union[str, Any] = vocab_size
_A : Any = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Dict = intermediate_size
_A : Dict = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Union[str, Any] = initializer_range
_A : List[Any] = layer_norm_eps
_A : Tuple = classifier_dropout
_A : Union[str, Any] = is_decoder
_A : List[Any] = act_dropout
| 26 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 26 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
_A : str = tempfile.mkdtemp()
_A : List[Any] = BlipImageProcessor()
_A : Optional[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A : Tuple = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A : Any = InstructBlipProcessor(_a , _a , _a )
processor.save_pretrained(self.tmpdirname )
def a__ ( self , **_a ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def a__ ( self , **_a ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def a__ ( self , **_a ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).qformer_tokenizer
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Optional[Any]:
_A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : Any = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : Tuple = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_A : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
self.assertIsInstance(processor.qformer_tokenizer , _a )
def a__ ( self ) -> Dict:
_A : int = self.get_image_processor()
_A : Dict = self.get_tokenizer()
_A : Dict = self.get_qformer_tokenizer()
_A : int = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Optional[Any] = self.prepare_image_inputs()
_A : List[Any] = image_processor(_a , return_tensors="""np""" )
_A : str = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : List[Any] = self.get_qformer_tokenizer()
_A : List[Any] = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Optional[int] = """lower newer"""
_A : List[str] = processor(text=_a )
_A : str = tokenizer(_a , return_token_type_ids=_a )
_A : List[str] = qformer_tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def a__ ( self ) -> List[str]:
_A : Any = self.get_image_processor()
_A : List[str] = self.get_tokenizer()
_A : Dict = self.get_qformer_tokenizer()
_A : List[str] = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Tuple = """lower newer"""
_A : Optional[int] = self.prepare_image_inputs()
_A : Any = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def a__ ( self ) -> Dict:
_A : Dict = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : str = self.get_qformer_tokenizer()
_A : int = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : List[str] = processor.batch_decode(_a )
_A : Optional[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> str:
_A : List[Any] = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : int = self.get_qformer_tokenizer()
_A : List[Any] = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Dict = """lower newer"""
_A : int = self.prepare_image_inputs()
_A : int = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 26 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if digit_amount > 0:
return round(number - int(snake_case_ ),snake_case_ )
return number - int(snake_case_ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase :
def __init__( self , _a ) -> Union[str, Any]:
_A : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A : Dict = len(_a ) - 1
def a__ ( self , _a ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_a ) , 5 ) == 1
return output_values
def a__ ( self , _a ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : Optional[int] = self.basis_function(_a )
_A : List[str] = 0.0
_A : Tuple = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def a__ ( self , _a = 0.01 ) -> str:
from matplotlib import pyplot as plt # type: ignore
_A : list[float] = [] # x coordinates of points to plot
_A : list[float] = [] # y coordinates of points to plot
_A : str = 0.0
while t <= 1:
_A : Optional[Any] = self.bezier_curve_function(_a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A : Union[str, Any] = [i[0] for i in self.list_of_points]
_A : str = [i[1] for i in self.list_of_points]
plt.plot(
_a , _a , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_a , _a , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 26 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCAmelCase_ ( snake_case_ ):
if "cls_token" in name:
_A : Optional[Any] = name.replace("""cls_token""","""vit.embeddings.cls_token""" )
if "mask_token" in name:
_A : int = name.replace("""mask_token""","""decoder.mask_token""" )
if "decoder_pos_embed" in name:
_A : int = name.replace("""decoder_pos_embed""","""decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_A : Optional[int] = name.replace("""pos_embed""","""vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_A : Union[str, Any] = name.replace("""patch_embed.proj""","""vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_A : Tuple = name.replace("""patch_embed.norm""","""vit.embeddings.norm""" )
if "decoder_blocks" in name:
_A : Any = name.replace("""decoder_blocks""","""decoder.decoder_layers""" )
if "blocks" in name:
_A : int = name.replace("""blocks""","""vit.encoder.layer""" )
if "attn.proj" in name:
_A : Any = name.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in name:
_A : Any = name.replace("""attn""","""attention.self""" )
if "norm1" in name:
_A : Optional[int] = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : Dict = name.replace("""norm2""","""layernorm_after""" )
if "mlp.fc1" in name:
_A : Dict = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : Union[str, Any] = name.replace("""mlp.fc2""","""output.dense""" )
if "decoder_embed" in name:
_A : Optional[Any] = name.replace("""decoder_embed""","""decoder.decoder_embed""" )
if "decoder_norm" in name:
_A : Optional[Any] = name.replace("""decoder_norm""","""decoder.decoder_norm""" )
if "decoder_pred" in name:
_A : Optional[Any] = name.replace("""decoder_pred""","""decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
_A : Any = name.replace("""norm.weight""","""vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
_A : Optional[Any] = name.replace("""norm.bias""","""vit.layernorm.bias""" )
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for key in orig_state_dict.copy().keys():
_A : Dict = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_A : Any = key.split(""".""" )
_A : Optional[Any] = int(key_split[1] )
if "decoder_blocks" in key:
_A : int = config.decoder_hidden_size
_A : Tuple = """decoder.decoder_layers."""
if "weight" in key:
_A : Tuple = val[:dim, :]
_A : str = val[dim : dim * 2, :]
_A : int = val[-dim:, :]
elif "bias" in key:
_A : Optional[Any] = val[:dim]
_A : List[Any] = val[dim : dim * 2]
_A : List[Any] = val[-dim:]
else:
_A : Optional[Any] = config.hidden_size
_A : Optional[int] = """vit.encoder.layer."""
if "weight" in key:
_A : str = val[:dim, :]
_A : Any = val[dim : dim * 2, :]
_A : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
_A : Union[str, Any] = val[:dim]
_A : Any = val[dim : dim * 2]
_A : int = val[-dim:]
else:
_A : Optional[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = ViTMAEConfig()
if "large" in checkpoint_url:
_A : List[str] = 1024
_A : Any = 4096
_A : List[Any] = 24
_A : Union[str, Any] = 16
elif "huge" in checkpoint_url:
_A : Optional[int] = 14
_A : List[str] = 1280
_A : Any = 5120
_A : Union[str, Any] = 32
_A : Optional[Any] = 16
_A : Union[str, Any] = ViTMAEForPreTraining(snake_case_ )
_A : Dict = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""" )["""model"""]
_A : Dict = ViTMAEImageProcessor(size=config.image_size )
_A : Union[str, Any] = convert_state_dict(snake_case_,snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
_A : Any = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
_A : List[str] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
_A : str = ViTMAEImageProcessor(size=config.image_size )
_A : Union[str, Any] = image_processor(images=snake_case_,return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_A : str = model(**snake_case_ )
_A : Optional[int] = outputs.logits
if "large" in checkpoint_url:
_A : int = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
_A : List[str] = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
_A : List[str] = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3],snake_case_,atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 26 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 1 |
from collections import namedtuple
_snake_case = namedtuple("from_to", "from_ to")
_snake_case = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_0_1, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"cubicfoot": from_to(0.0_2_8, 3_5.3_1_4_7),
"cup": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(snake_case_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(snake_case_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=UpperCamelCase__ ):
_a = ["note_seq"]
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ["""note_seq"""] )
@classmethod
def a__ ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def a__ ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ["""note_seq"""] )
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Tuple:
_A : str = 0
@slow
def a__ ( self ) -> Tuple:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_A : Tuple = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_A : Optional[Any] = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
def a__ ( self ) -> List[str]:
_A : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def a__ ( self ) -> Tuple:
_A : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def a__ ( self ) -> str:
_A : int = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
# Check that tokenizer_type ≠ model_type
_A : int = AutoTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_a , """vocab.txt""" ) )
_A : Dict = AutoTokenizer.from_pretrained(_a , tokenizer_type="""bert""" , use_fast=_a )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_a , """merges.txt""" ) )
_A : str = AutoTokenizer.from_pretrained(_a , tokenizer_type="""gpt2""" , use_fast=_a )
self.assertIsInstance(_a , _a )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_a , """vocab.txt""" ) )
_A : int = AutoTokenizer.from_pretrained(_a , tokenizer_type="""bert""" )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_a , """merges.txt""" ) )
_A : Dict = AutoTokenizer.from_pretrained(_a , tokenizer_type="""gpt2""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Tuple:
with pytest.raises(_a ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def a__ ( self ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_A : List[str] = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
if isinstance(_a , _a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _a )
else:
self.assertEqual(tokenizer.do_lower_case , _a )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_a , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
_A : Dict = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def a__ ( self ) -> int:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_A : Dict = TOKENIZER_MAPPING.values()
_A : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_a )
@require_tokenizers
def a__ ( self ) -> str:
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_a ) , _a )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , _a )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=_a )
_A : Optional[Any] = """Hello, world. How are you?"""
_A : List[Any] = tokenizer.tokenize(_a )
self.assertEqual("""[UNK]""" , tokens[0] )
_A : str = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=_a )
_A : Tuple = tokenizer.tokenize(_a )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def a__ ( self ) -> Any:
_A : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(_a ) , _a )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def a__ ( self ) -> Tuple:
_A : Optional[int] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def a__ ( self ) -> Dict:
_A : Tuple = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Check we can load the tokenizer config of an online model.
_A : List[str] = get_tokenizer_config("""bert-base-cased""" )
_A : Any = config.pop("""_commit_hash""" , _a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_a , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_A : Optional[int] = get_tokenizer_config(_a )
self.assertDictEqual(_a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_A : Union[str, Any] = AutoTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[Any] = get_tokenizer_config(_a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def a__ ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
_A : List[Any] = CustomTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : int = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> str:
try:
AutoConfig.register("""custom""" , _a )
# Can register in two steps
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_a , slow_tokenizer_class=_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_A : str = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
_A : Optional[Any] = CustomTokenizerFast.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : Any = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_A : Dict = AutoTokenizer.from_pretrained(_a , use_fast=_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
_A : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : int = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[Any] = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a , use_fast=_a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def a__ ( self ) -> int:
class lowercase ( UpperCamelCase__ ):
_a = False
class lowercase ( UpperCamelCase__ ):
_a = NewTokenizer
_a = False
try:
AutoConfig.register("""custom""" , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# If remote code is not set, the default is to use local
_A : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_A : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
_A : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
_A : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def a__ ( self ) -> Tuple:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : Union[str, Any] = AutoTokenizer.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> str:
# Make sure we have cached the tokenizer.
_A : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 26 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
_snake_case = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : int = len(snake_case_ )
_A : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_A : list = []
for char_count in range(snake_case_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> int:
_A : Union[str, Any] = False
super().__init__(_a , _a )
_A : int = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Optional[int] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : Any = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[str] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : Dict = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> int:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> int:
_A : Dict = self.tokenizer.model_input_names
_A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_A : List[str] = TOKENIZER_CLASSES
else:
_A : Union[str, Any] = {tokenizer_name: getattr(snake_case_,tokenizer_name + """Fast""" )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_A : Tuple = TOKENIZER_CLASSES[tokenizer_name]
_A : Optional[int] = True
if checkpoint_name is None:
_A : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_A : Dict = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_A : List[Any] = tokenizer_class.from_pretrained(snake_case_,force_download=snake_case_ )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_A , _A : Any = checkpoint.split("""/""" )
_A : Optional[Any] = os.path.join(snake_case_,snake_case_ )
elif add_prefix:
_A : Optional[int] = checkpoint
_A : List[str] = dump_path
else:
_A : List[Any] = None
_A : int = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_A : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_A : List[Any] = file_path.split(snake_case_ )[-1][0]
if next_char == "/":
_A : Optional[Any] = os.path.join(snake_case_,snake_case_ )
_A : List[str] = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_A : Optional[Any] = tokenizer.save_pretrained(
snake_case_,legacy_format=snake_case_,filename_prefix=snake_case_ )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(snake_case_ )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
_snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case = 16
_snake_case = 32
def lowerCAmelCase_ ( snake_case_ ):
return int(x / 2**20 )
class lowercase :
def __enter__( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_A : Any = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
_A : List[Any] = torch.cuda.memory_allocated()
_A : Dict = torch.cuda.max_memory_allocated()
_A : str = bamb(self.end - self.begin )
_A : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( snake_case_,snake_case_ = 16,snake_case_ = "bert-base-cased",snake_case_ = 320,snake_case_ = 160,):
_A : List[Any] = AutoTokenizer.from_pretrained(snake_case_ )
_A : List[str] = load_dataset(
"""glue""","""mrpc""",split={"""train""": f'''train[:{n_train}]''', """validation""": f'''validation[:{n_val}]'''} )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
_A : Union[str, Any] = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=snake_case_,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A : List[Any] = datasets.map(
snake_case_,batched=snake_case_,remove_columns=["""idx""", """sentence1""", """sentence2"""],load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : List[Any] = tokenized_datasets.rename_column("""label""","""labels""" )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_,padding="""max_length""",max_length=128,return_tensors="""pt""" )
return tokenizer.pad(snake_case_,padding="""longest""",return_tensors="""pt""" )
# Instantiate dataloaders.
_A : Dict = DataLoader(
tokenized_datasets["""train"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ )
_A : int = DataLoader(
tokenized_datasets["""validation"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Initialize accelerator
_A : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : Optional[int] = config["""lr"""]
_A : Tuple = int(config["""num_epochs"""] )
_A : Any = int(config["""seed"""] )
_A : Tuple = int(config["""batch_size"""] )
_A : Any = args.model_name_or_path
set_seed(snake_case_ )
_A , _A : Union[str, Any] = get_dataloaders(snake_case_,snake_case_,snake_case_,args.n_train,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Optional[int] = AutoModelForSequenceClassification.from_pretrained(snake_case_,return_dict=snake_case_ )
# Instantiate optimizer
_A : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A : Tuple = optimizer_cls(params=model.parameters(),lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
_A : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_A : int = 1
_A : str = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A : str = get_linear_schedule_with_warmup(
optimizer=snake_case_,num_warmup_steps=0,num_training_steps=snake_case_,)
else:
_A : Union[str, Any] = DummyScheduler(snake_case_,total_num_steps=snake_case_,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A : Dict = accelerator.prepare(
snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
# We need to keep track of how many total steps we have iterated over
_A : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_A : Optional[Any] = 0
# Now we train the model
_A : Optional[Any] = {}
for epoch in range(snake_case_,snake_case_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case_ ):
_A : Dict = model(**snake_case_ )
_A : Dict = outputs.loss
_A : Dict = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_A : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir,"""peak_memory_utilization.json""" ),"""w""" ) as f:
json.dump(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""",type=snake_case_,default="""bert-base-cased""",help="""Path to pretrained model or model identifier from huggingface.co/models.""",required=snake_case_,)
parser.add_argument(
"""--output_dir""",type=snake_case_,default=""".""",help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""",)
parser.add_argument(
"""--peak_memory_upper_bound""",type=snake_case_,default=snake_case_,help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""",)
parser.add_argument(
"""--n_train""",type=snake_case_,default=320,help="""Number of training examples to use.""",)
parser.add_argument(
"""--n_val""",type=snake_case_,default=160,help="""Number of validation examples to use.""",)
parser.add_argument(
"""--num_epochs""",type=snake_case_,default=1,help="""Number of train epochs.""",)
_A : Optional[int] = parser.parse_args()
_A : Optional[int] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case_,snake_case_ )
if __name__ == "__main__":
main()
| 26 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=1 / 255 , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_A : int = parent
_A : Optional[Any] = batch_size
_A : List[str] = num_channels
_A : Dict = min_resolution
_A : Union[str, Any] = max_resolution
_A : Optional[Any] = do_resize
_A : Optional[int] = size
_A : Optional[Any] = do_rescale
_A : Optional[Any] = rescale_factor
_A : Optional[int] = do_normalize
_A : List[str] = image_mean
_A : Optional[Any] = image_std
_A : Union[str, Any] = do_pad
def a__ ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self , _a , _a=False ) -> str:
if not batched:
_A : Optional[Any] = image_inputs[0]
if isinstance(_a , Image.Image ):
_A , _A : Union[str, Any] = image.size
else:
_A , _A : int = image.shape[1], image.shape[2]
if w < h:
_A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
_A : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
_A : List[Any] = self.size["""shortest_edge"""]
_A : List[str] = int(self.size["""shortest_edge"""] * w / h )
else:
_A : List[Any] = self.size["""shortest_edge"""]
_A : Any = self.size["""shortest_edge"""]
else:
_A : Optional[int] = []
for image in image_inputs:
_A , _A : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A : List[Any] = max(_a , key=lambda _a : item[0] )[0]
_A : Tuple = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DetrImageProcessor if is_vision_available() else None
def a__ ( self ) -> List[str]:
_A : List[str] = DetrImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_rescale""" ) )
self.assertTrue(hasattr(_a , """rescale_factor""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
def a__ ( self ) -> str:
_A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _a )
_A : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _a )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> int:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_A , _A : int = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A : List[Any] = self.image_processor_tester.get_expected_values(_a , batched=_a )
_A : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_A , _A : Optional[int] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
_A , _A : Dict = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_A , _A : str = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
_A , _A : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ) -> Tuple:
# prepare image and target
_A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_A : Union[str, Any] = json.loads(f.read() )
_A : List[str] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
_A : List[Any] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
_A : Any = image_processing(images=_a , annotations=_a , return_tensors="""pt""" )
# verify pixel values
_A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _a )
_A : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
_A : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _a ) )
# verify boxes
_A : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _a )
_A : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _a , atol=1e-3 ) )
# verify image_id
_A : Tuple = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _a ) )
# verify is_crowd
_A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _a ) )
# verify class_labels
_A : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _a ) )
# verify orig_size
_A : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _a ) )
# verify size
_A : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _a ) )
@slow
def a__ ( self ) -> Optional[int]:
# prepare image, target and masks_path
_A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_A : Any = json.loads(f.read() )
_A : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
_A : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_A : Optional[int] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
_A : Tuple = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors="""pt""" )
# verify pixel values
_A : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _a )
_A : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
_A : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _a ) )
# verify boxes
_A : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _a )
_A : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _a , atol=1e-3 ) )
# verify image_id
_A : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _a ) )
# verify is_crowd
_A : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _a ) )
# verify class_labels
_A : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _a ) )
# verify masks
_A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _a )
# verify orig_size
_A : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _a ) )
# verify size
_A : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _a ) )
| 26 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_A : Tuple = key.replace("""module.encoder""","""glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_A : str = key.replace("""module.decoder""","""decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_A : List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_A : List[Any] = key.replace(f'''patch_embed{idx}''',f'''patch_embeddings.{int(snake_case_ )-1}''' )
if "norm" in key:
_A : Any = key.replace("""norm""","""layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_A : List[str] = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_A : List[str] = key.replace(f'''layer_norm{idx}''',f'''layer_norm.{int(snake_case_ )-1}''' )
if "layer_norm1" in key:
_A : Union[str, Any] = key.replace("""layer_norm1""","""layer_norm_1""" )
if "layer_norm2" in key:
_A : Any = key.replace("""layer_norm2""","""layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_A : Optional[int] = key[key.find("""block""" ) + len("""block""" )]
_A : List[str] = key.replace(f'''block{idx}''',f'''block.{int(snake_case_ )-1}''' )
if "attn.q" in key:
_A : str = key.replace("""attn.q""","""attention.self.query""" )
if "attn.proj" in key:
_A : Union[str, Any] = key.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in key:
_A : int = key.replace("""attn""","""attention.self""" )
if "fc1" in key:
_A : Optional[int] = key.replace("""fc1""","""dense1""" )
if "fc2" in key:
_A : Optional[Any] = key.replace("""fc2""","""dense2""" )
if "linear_pred" in key:
_A : Optional[int] = key.replace("""linear_pred""","""classifier""" )
if "linear_fuse" in key:
_A : List[Any] = key.replace("""linear_fuse.conv""","""linear_fuse""" )
_A : List[str] = key.replace("""linear_fuse.bn""","""batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_A : Optional[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_A : List[str] = key.replace(f'''linear_c{idx}''',f'''linear_c.{int(snake_case_ )-1}''' )
if "bot_conv" in key:
_A : List[Any] = key.replace("""bot_conv""","""0.convolution""" )
if "skip_conv1" in key:
_A : Union[str, Any] = key.replace("""skip_conv1""","""1.convolution""" )
if "skip_conv2" in key:
_A : Union[str, Any] = key.replace("""skip_conv2""","""2.convolution""" )
if "fusion1" in key:
_A : int = key.replace("""fusion1""","""1.fusion""" )
if "fusion2" in key:
_A : str = key.replace("""fusion2""","""2.fusion""" )
if "fusion3" in key:
_A : Optional[Any] = key.replace("""fusion3""","""3.fusion""" )
if "fusion" in key and "conv" in key:
_A : Any = key.replace("""conv""","""convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_A : List[Any] = key.replace("""module.last_layer_depth""","""head.head""" )
_A : int = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_A : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_A : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_A : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
_A : int = kv_bias[: config.hidden_sizes[i]]
_A : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
_A : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ( ):
_A : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : List[str] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False,snake_case_=None ):
_A : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512],decoder_hidden_size=64,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_A : Tuple = GLPNImageProcessor()
# prepare image
_A : List[Any] = prepare_img()
_A : str = image_processor(images=snake_case_,return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_A : int = torch.load(snake_case_,map_location=torch.device("""cpu""" ) )
# rename keys
_A : Tuple = rename_keys(snake_case_ )
# key and value matrices need special treatment
read_in_k_v(snake_case_,snake_case_ )
# create HuggingFace model and load state dict
_A : int = GLPNForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# forward pass
_A : str = model(snake_case_ )
_A : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_A : List[str] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_A : List[str] = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
_A : Optional[int] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3],snake_case_,atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case_,snake_case_ ),organization="""nielsr""",commit_message="""Add model""",use_temp_dir=snake_case_,)
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_,snake_case_ ),organization="""nielsr""",commit_message="""Add image processor""",use_temp_dir=snake_case_,)
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
_snake_case = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 26 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
assert column_title.isupper()
_A : Any = 0
_A : List[str] = len(snake_case_ ) - 1
_A : Optional[Any] = 0
while index >= 0:
_A : Optional[int] = (ord(column_title[index] ) - 64) * pow(26,snake_case_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_snake_case = None
_snake_case = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
_snake_case = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def lowerCAmelCase_ ( snake_case_,snake_case_=1,snake_case_=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,"""r""" ) as f:
return json.load(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
with open(snake_case_,"""w""" ) as f:
json.dump(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=True ):
os.makedirs(snake_case_,exist_ok=snake_case_ )
_A : Tuple = os.path.join(snake_case_,"""tmp""" )
os.makedirs(snake_case_,exist_ok=snake_case_ )
_A : int = read_json(os.path.join(snake_case_,"""params.json""" ) )
_A : Any = NUM_SHARDS[model_size]
_A : Dict = params["""n_layers"""]
_A : Optional[Any] = params["""n_heads"""]
_A : Union[str, Any] = n_heads // num_shards
_A : Dict = params["""dim"""]
_A : Optional[Any] = dim // n_heads
_A : List[Any] = 1_00_00.0
_A : List[str] = 1.0 / (base ** (torch.arange(0,snake_case_,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_A : str = params["""n_kv_heads"""] # for GQA / MQA
_A : List[Any] = n_heads_per_shard // num_key_value_heads
_A : List[str] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_A : Any = n_heads
_A : Optional[int] = n_heads_per_shard
_A : List[Any] = dim
# permute for sliced rotary
def permute(snake_case_,snake_case_=n_heads,snake_case_=dim,snake_case_=dim ):
return w.view(snake_case_,dima // n_heads // 2,2,snake_case_ ).transpose(1,2 ).reshape(snake_case_,snake_case_ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_A : str = torch.load(os.path.join(snake_case_,"""consolidated.00.pth""" ),map_location="""cpu""" )
else:
# Sharded
_A : int = [
torch.load(os.path.join(snake_case_,f'''consolidated.{i:02d}.pth''' ),map_location="""cpu""" )
for i in range(snake_case_ )
]
_A : Optional[int] = 0
_A : Optional[Any] = {"""weight_map""": {}}
for layer_i in range(snake_case_ ):
_A : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
_A : Union[str, Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_A : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
_A : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(snake_case_,snake_case_,snake_case_ )
for i in range(snake_case_ )
],dim=0,).reshape(snake_case_,snake_case_ ) )
_A : Optional[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
snake_case_,snake_case_,snake_case_ )
for i in range(snake_case_ )
],dim=0,).reshape(snake_case_,snake_case_ ),snake_case_,snake_case_,snake_case_,)
_A : Union[str, Any] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
snake_case_,snake_case_,snake_case_ )
for i in range(snake_case_ )
],dim=0,).reshape(snake_case_,snake_case_ )
_A : str = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case_ )],dim=1 )
_A : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case_ )],dim=0 )
_A : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case_ )],dim=1 )
_A : Any = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case_ )],dim=0 )
_A : Any = inv_freq
for k, v in state_dict.items():
_A : Dict = filename
param_count += v.numel()
torch.save(snake_case_,os.path.join(snake_case_,snake_case_ ) )
_A : Optional[Any] = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
_A : Optional[Any] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_A : Optional[int] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(snake_case_ )],dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(snake_case_ )],dim=0 ),
}
for k, v in state_dict.items():
_A : List[str] = filename
param_count += v.numel()
torch.save(snake_case_,os.path.join(snake_case_,snake_case_ ) )
# Write configs
_A : Tuple = {"""total_size""": param_count * 2}
write_json(snake_case_,os.path.join(snake_case_,"""pytorch_model.bin.index.json""" ) )
_A : Any = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_A : int = params["""multiple_of"""] if """multiple_of""" in params else 256
_A : str = LlamaConfig(
hidden_size=snake_case_,intermediate_size=compute_intermediate_size(snake_case_,snake_case_,snake_case_ ),num_attention_heads=params["""n_heads"""],num_hidden_layers=params["""n_layers"""],rms_norm_eps=params["""norm_eps"""],num_key_value_heads=snake_case_,)
config.save_pretrained(snake_case_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_A : Optional[Any] = LlamaForCausalLM.from_pretrained(snake_case_,torch_dtype=torch.floataa,low_cpu_mem_usage=snake_case_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(snake_case_,safe_serialization=snake_case_ )
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Initialize the tokenizer based on the `spm` model
_A : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
_A : List[Any] = tokenizer_class(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""",help="""Location of LLaMA weights, which contains tokenizer.model and model folders""",)
parser.add_argument(
"""--model_size""",choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""],)
parser.add_argument(
"""--output_dir""",help="""Location to write HF model and tokenizer""",)
parser.add_argument("""--safe_serialization""",type=snake_case_,help="""Whether or not to save using `safetensors`.""" )
_A : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir,input_base_path=os.path.join(args.input_dir,args.model_size ),model_size=args.model_size,safe_serialization=args.safe_serialization,)
_A : int = os.path.join(args.input_dir,"""tokenizer.model""" )
write_tokenizer(args.output_dir,snake_case_ )
if __name__ == "__main__":
main()
| 26 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 1 |
from math import factorial
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(snake_case_,snake_case_ ) or not isinstance(snake_case_,snake_case_ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_A : Union[str, Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_A : Any = float(factorial(snake_case_ ) )
coefficient /= factorial(snake_case_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 26 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = tmp_path / """file.csv"""
_A : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(snake_case_,"""w""" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = tmp_path / """malformed_file.csv"""
_A : str = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(snake_case_,"""w""" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = tmp_path / """csv_with_image.csv"""
_A : str = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(snake_case_,"""w""" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = tmp_path / """csv_with_label.csv"""
_A : Optional[int] = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(snake_case_,"""w""" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = tmp_path / """csv_with_int_list.csv"""
_A : Tuple = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(snake_case_,"""w""" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = Csv()
_A : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case_,match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(snake_case_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,encoding="""utf-8""" ) as f:
_A : str = f.read().splitlines()[1]
_A : Union[str, Any] = Csv(encoding="""utf-8""",features=Features({"""image""": Image()} ) )
_A : List[Any] = csv._generate_tables([[csv_file_with_image]] )
_A : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
_A : Dict = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,encoding="""utf-8""" ) as f:
_A : List[str] = f.read().splitlines()[1:]
_A : Optional[Any] = Csv(encoding="""utf-8""",features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
_A : Tuple = csv._generate_tables([[csv_file_with_label]] )
_A : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
_A : Tuple = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(snake_case_ ) for label in labels]
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = Csv(encoding="""utf-8""",sep=""",""",converters={"""int_list""": lambda snake_case_ : [int(snake_case_ ) for i in x.split()]} )
_A : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_A : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
_A : str = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 1 |
import unittest
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = None,):
_A : Union[str, Any] = np.shape(snake_case_ )
_A : Optional[Any] = np.shape(snake_case_ )
_A : Any = np.shape(snake_case_ )
if shape_a[0] != shape_b[0]:
_A : List[Any] = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(snake_case_ )
if shape_b[1] != shape_c[1]:
_A : List[Any] = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(snake_case_ )
_A : Dict = pseudo_inv
if a_inv is None:
try:
_A : int = np.linalg.inv(snake_case_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> None:
_A : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
_A : int = np.array([[2, 1], [6, 3]] )
_A : Tuple = schur_complement(_a , _a , _a )
_A : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_A : Any = np.linalg.det(_a )
_A : Dict = np.linalg.det(_a )
_A : Tuple = np.linalg.det(_a )
self.assertAlmostEqual(_a , det_a * det_s )
def a__ ( self ) -> None:
_A : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_A : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
def a__ ( self ) -> None:
_A : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A : Any = np.array([[0, 3], [3, 0], [2, 3]] )
_A : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 26 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_snake_case = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CamembertTokenizer
_a = CamembertTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Optional[Any] = CamembertTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ) -> Any:
_A : str = """<pad>"""
_A : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> List[Any]:
_A : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_a ) , 1004 )
def a__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = CamembertTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
_A : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_A : int = """I was born in 92000, and this is falsé."""
_A : Any = tokenizer.encode(_a )
_A : int = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
_A : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
_A : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_A : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
_A : List[str] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
_A : Optional[int] = self.get_tokenizer()
_A : Dict = self.get_rust_tokenizer()
_A : Any = """I was born in 92000, and this is falsé."""
_A : Any = tokenizer.tokenize(_a )
_A : Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
_A : Tuple = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_A : List[Any] = self.get_rust_tokenizer()
_A : List[Any] = tokenizer.encode(_a )
_A : Union[str, Any] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def a__ ( self ) -> Tuple:
# fmt: off
_A : Any = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_A : List[str] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=_a , )
| 26 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_snake_case = logging.getLogger(__name__)
class lowercase :
def __init__( self ) -> Union[str, Any]:
_A : Tuple = False
def a__ ( self , _a , _a , _a , _a ) -> Optional[int]:
if not self.initialized:
_A : Any = RagRetriever(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
_A : Any = True
def a__ ( self ) -> Dict:
self.retriever.index.init_index()
def a__ ( self , _a , _a ) -> Any:
_A , _A : List[str] = self.retriever._main_retrieve(_a , _a )
return doc_ids, retrieved_doc_embeds
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a=None ) -> Dict:
if index is not None and index.is_initialized() and len(_a ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
_A : int = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_a , _a , _a , _a )
for worker in self.retrieval_workers
] )
def a__ ( self ) -> Union[str, Any]:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def a__ ( self , _a , _a ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_A : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_A , _A : List[str] = ray.get(random_worker.retrieve.remote(_a , _a ) )
else:
_A , _A : Optional[int] = self._main_retrieve(_a , _a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a )
@classmethod
def a__ ( cls , _a , _a=None , **_a ) -> List[Any]:
return super(_a , cls ).get_tokenizers(_a , _a , **_a )
@classmethod
def a__ ( cls , _a , _a , _a=None , **_a ) -> Union[str, Any]:
_A : Any = kwargs.pop("""config""" , _a ) or RagConfig.from_pretrained(_a , **_a )
_A : str = RagTokenizer.from_pretrained(_a , config=_a )
_A : Dict = rag_tokenizer.question_encoder
_A : Any = rag_tokenizer.generator
if indexed_dataset is not None:
_A : Optional[int] = """custom"""
_A : Any = CustomHFIndex(config.retrieval_vector_size , _a )
else:
_A : int = cls._build_index(_a )
return cls(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , retrieval_workers=_a , index=_a , )
| 26 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 26 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A , _A : Any = set(snake_case_ ), [start]
while stack:
_A : Union[str, Any] = stack.pop()
explored.add(snake_case_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case_ )
return explored
_snake_case = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER","False" ) ) is not True,reason="Skipping test because should only be run when releasing minor transformers version",)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_a , )
assert hasattr(self , """env""" )
def a__ ( self , _a ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
_A : Optional[Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_A : List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_A : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_A : Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version="""py36""" , )
def a__ ( self , _a ) -> Optional[int]:
TrainingJobAnalytics(_a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def a__ ( self , _a ) -> Union[str, Any]:
# create estimator
_A : str = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
_A : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _a )
| 26 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_snake_case = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(snake_case_,snake_case_ )
_snake_case = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = list(s_dict.keys() )
for key in keys:
_A : List[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_A : Optional[Any] = new_key.replace(snake_case_,snake_case_ )
print(f'''{key} -> {new_key}''' )
_A : List[Any] = s_dict.pop(snake_case_ )
return s_dict
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : List[Any] = emb.weight.shape
_A : Union[str, Any] = nn.Linear(snake_case_,snake_case_,bias=snake_case_ )
_A : Dict = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
os.makedirs(snake_case_,exist_ok=snake_case_ )
_A : Dict = os.path.basename(snake_case_ )
_A : Any = url.split("""/""" )[-2]
_A : int = os.path.join(snake_case_,snake_case_ )
if os.path.exists(snake_case_ ) and not os.path.isfile(snake_case_ ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(snake_case_ ):
_A : List[Any] = open(snake_case_,"""rb""" ).read()
if hashlib.shaaaa(snake_case_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(snake_case_ ) as source, open(snake_case_,"""wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ),ncols=80,unit="""iB""",unit_scale=snake_case_,unit_divisor=1024 ) as loop:
while True:
_A : Dict = source.read(8192 )
if not buffer:
break
output.write(snake_case_ )
loop.update(len(snake_case_ ) )
_A : Any = open(snake_case_,"""rb""" ).read()
if hashlib.shaaaa(snake_case_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if ".pt" not in checkpoint_path:
_A : List[str] = _download(_MODELS[checkpoint_path] )
else:
_A : Any = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = original_checkpoint["""dims"""]
_A : str = original_checkpoint["""model_state_dict"""]
_A : Tuple = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(snake_case_ )
rename_keys(snake_case_ )
_A : str = True
_A : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
_A : Optional[int] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""],encoder_ffn_dim=snake_case_,decoder_ffn_dim=snake_case_,num_mel_bins=dimensions["""n_mels"""],d_model=dimensions["""n_audio_state"""],max_target_positions=dimensions["""n_text_ctx"""],encoder_layers=dimensions["""n_audio_layer"""],encoder_attention_heads=dimensions["""n_audio_head"""],decoder_layers=dimensions["""n_text_layer"""],decoder_attention_heads=dimensions["""n_text_state"""],max_source_positions=dimensions["""n_audio_ctx"""],)
_A : str = WhisperForConditionalGeneration(snake_case_ )
_A , _A : Union[str, Any] = model.model.load_state_dict(snake_case_,strict=snake_case_ )
if len(snake_case_ ) > 0 and not set(snake_case_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_A : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_A : Tuple = proj_out_weights
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
return "".join([hex(snake_case_ )[2:].zfill(2 ).upper() for byte in list(snake_case_ )] )
def lowerCAmelCase_ ( snake_case_ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1],16 ) for i in range(0,len(snake_case_ ),2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = sorted(numsa + numsa )
_A , _A : Any = divmod(len(snake_case_ ),2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [float(x) for x in input("Enter the elements of first array: ").split()]
_snake_case = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 26 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
_snake_case = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_A : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case_ ) )
return round(snake_case_,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = True
@register_to_config
def __init__( self , _a = 3 , _a = 3 , _a = ("DownEncoderBlock2D",) , _a = ("UpDecoderBlock2D",) , _a = (64,) , _a = 1 , _a = "silu" , _a = 4 , _a = 32 , _a = 32 , _a = 0.18215 , ) -> Any:
super().__init__()
# pass init params to Encoder
_A : Tuple = Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
# pass init params to Decoder
_A : Union[str, Any] = Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , norm_num_groups=_a , act_fn=_a , )
_A : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_A : List[str] = nn.Convad(_a , _a , 1 )
_A : List[Any] = False
_A : Any = False
# only relevant if vae tiling is enabled
_A : Any = self.config.sample_size
_A : Union[str, Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_A : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_A : int = 0.25
def a__ ( self , _a , _a=False ) -> str:
if isinstance(_a , (Encoder, Decoder) ):
_A : Dict = value
def a__ ( self , _a = True ) -> int:
_A : int = use_tiling
def a__ ( self ) -> Dict:
self.enable_tiling(_a )
def a__ ( self ) -> Optional[int]:
_A : Tuple = True
def a__ ( self ) -> List[str]:
_A : List[Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : Dict = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> str:
_A : int = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Dict:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , _a , _a = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_a , return_dict=_a )
if self.use_slicing and x.shape[0] > 1:
_A : List[Any] = [self.encoder(_a ) for x_slice in x.split(1 )]
_A : Dict = torch.cat(_a )
else:
_A : Any = self.encoder(_a )
_A : Tuple = self.quant_conv(_a )
_A : Any = DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def a__ ( self , _a , _a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_a , return_dict=_a )
_A : Union[str, Any] = self.post_quant_conv(_a )
_A : int = self.decoder(_a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
@apply_forward_hook
def a__ ( self , _a , _a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_A : Dict = [self._decode(_a ).sample for z_slice in z.split(1 )]
_A : str = torch.cat(_a )
else:
_A : Optional[int] = self._decode(_a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_a )
def a__ ( self , _a , _a , _a ) -> Any:
_A : int = min(a.shape[2] , b.shape[2] , _a )
for y in range(_a ):
_A : List[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , _a , _a , _a ) -> Tuple:
_A : Union[str, Any] = min(a.shape[3] , b.shape[3] , _a )
for x in range(_a ):
_A : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , _a , _a = True ) -> AutoencoderKLOutput:
_A : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_A : List[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
_A : Union[str, Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A : str = []
for i in range(0 , x.shape[2] , _a ):
_A : Dict = []
for j in range(0 , x.shape[3] , _a ):
_A : Optional[int] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A : Any = self.encoder(_a )
_A : List[str] = self.quant_conv(_a )
row.append(_a )
rows.append(_a )
_A : List[str] = []
for i, row in enumerate(_a ):
_A : Any = []
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A : str = self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_A : Tuple = self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_A : Tuple = torch.cat(_a , dim=2 )
_A : Union[str, Any] = DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def a__ ( self , _a , _a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_A : List[str] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_A : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
_A : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A : Optional[Any] = []
for i in range(0 , z.shape[2] , _a ):
_A : List[str] = []
for j in range(0 , z.shape[3] , _a ):
_A : Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A : List[str] = self.post_quant_conv(_a )
_A : List[Any] = self.decoder(_a )
row.append(_a )
rows.append(_a )
_A : Dict = []
for i, row in enumerate(_a ):
_A : str = []
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A : Optional[Any] = self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_A : Tuple = self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_A : Optional[Any] = torch.cat(_a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def a__ ( self , _a , _a = False , _a = True , _a = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_A : Tuple = sample
_A : Union[str, Any] = self.encode(_a ).latent_dist
if sample_posterior:
_A : str = posterior.sample(generator=_a )
else:
_A : Any = posterior.mode()
_A : str = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 26 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( UpperCamelCase__ ):
_a = "poolformer"
def __init__( self , _a=3 , _a=16 , _a=16 , _a=3 , _a=4.0 , _a=[2, 2, 6, 2] , _a=[64, 128, 320, 512] , _a=[7, 3, 3, 3] , _a=[4, 2, 2, 2] , _a=[2, 1, 1, 1] , _a=4 , _a=0.0 , _a="gelu" , _a=True , _a=1e-5 , _a=0.02 , **_a , ) -> List[str]:
_A : List[str] = num_channels
_A : List[Any] = patch_size
_A : List[str] = stride
_A : int = padding
_A : Optional[Any] = pool_size
_A : str = hidden_sizes
_A : int = mlp_ratio
_A : Optional[int] = depths
_A : Dict = patch_sizes
_A : List[Any] = strides
_A : Tuple = num_encoder_blocks
_A : Dict = drop_path_rate
_A : Dict = hidden_act
_A : str = use_layer_scale
_A : List[str] = layer_scale_init_value
_A : Dict = initializer_range
super().__init__(**_a )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 2e-3
| 26 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=3 , _a=224 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Dict = size if size is not None else {"""height""": 18, """width""": 18}
_A : Tuple = parent
_A : List[Any] = batch_size
_A : Any = num_channels
_A : Dict = image_size
_A : str = min_resolution
_A : int = max_resolution
_A : Union[str, Any] = do_resize
_A : Any = size
_A : Tuple = do_normalize
_A : int = image_mean
_A : Optional[int] = image_std
def a__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = ViTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> Tuple:
# Initialize image_processor
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processor(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processor
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_A : Dict = image_processor(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def a__ ( self ) -> int:
# Initialize image_processor
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_A : Optional[Any] = image_processor(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 26 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_snake_case = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_snake_case = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCAmelCase_ ( ):
_A : int = calculate_rouge(snake_case_,snake_case_,bootstrap_aggregation=snake_case_,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(snake_case_,snake_case_ )
_A : List[str] = calculate_rouge(snake_case_,snake_case_,bootstrap_aggregation=snake_case_,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCAmelCase_ ( ):
_A : Any = """rougeLsum"""
_A : List[str] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=[k] )[k]
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( ):
_A : Dict = ["""rouge1""", """rouge2""", """rougeL"""]
_A : Dict = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=snake_case_ )
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=snake_case_ )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_A : int = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_ ) == calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_ )
def lowerCAmelCase_ ( ):
_A : int = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_A : Any = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_A : Dict = calculate_rouge(snake_case_,snake_case_,rouge_keys=["""rougeLsum"""],newline_sep=snake_case_ )["""rougeLsum"""]
_A : List[str] = calculate_rouge(snake_case_,snake_case_,rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCAmelCase_ ( ):
_A : int = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_A : Optional[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ),data_dir.joinpath("""test.target""" ) )
assert isinstance(snake_case_,snake_case_ )
_A : Dict = calculate_rouge_path(
data_dir.joinpath("""test.source""" ),data_dir.joinpath("""test.target""" ),bootstrap_aggregation=snake_case_ )
assert isinstance(snake_case_,snake_case_ )
| 26 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ = 600851475143 ):
try:
_A : List[str] = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_A : Optional[Any] = 2
_A : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_A : List[Any] = i
while n % i == 0:
_A : Union[str, Any] = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = ShapEImgaImgPipeline
_a = ["image"]
_a = ["image"]
_a = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> Optional[int]:
return 32
@property
def a__ ( self ) -> Tuple:
return 32
@property
def a__ ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Optional[int]:
return 8
@property
def a__ ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_A : List[Any] = CLIPVisionModel(_a )
return model
@property
def a__ ( self ) -> Dict:
_A : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def a__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_A : str = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_A : Optional[int] = PriorTransformer(**_a )
return model
@property
def a__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_A : List[str] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_A : int = ShapERenderer(**_a )
return model
def a__ ( self ) -> int:
_A : Optional[Any] = self.dummy_prior
_A : Tuple = self.dummy_image_encoder
_A : str = self.dummy_image_processor
_A : Any = self.dummy_renderer
_A : str = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
_A : Union[str, Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def a__ ( self , _a , _a=0 ) -> int:
_A : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
_A : List[Any] = torch.manual_seed(_a )
else:
_A : Tuple = torch.Generator(device=_a ).manual_seed(_a )
_A : List[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : List[Any] = """cpu"""
_A : List[str] = self.get_dummy_components()
_A : str = self.pipeline_class(**_a )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = pipe(**self.get_dummy_inputs(_a ) )
_A : Tuple = output.images[0]
_A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Tuple:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self ) -> Tuple:
_A : List[str] = torch_device == """cpu"""
_A : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def a__ ( self ) -> Tuple:
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : int = 1
_A : List[str] = 2
_A : str = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
_A : str = batch_size * [inputs[key]]
_A : Union[str, Any] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
_A : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
_A : List[Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
_A : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device=_a ).manual_seed(0 )
_A : Optional[Any] = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 26 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 1 |
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
if hi < 0:
_A : List[Any] = len(snake_case_ )
while lo < hi:
_A : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_A : str = mid + 1
else:
_A : Tuple = mid
return lo
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
if hi < 0:
_A : List[str] = len(snake_case_ )
while lo < hi:
_A : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_A : Union[str, Any] = mid + 1
else:
_A : Any = mid
return lo
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
sorted_collection.insert(bisect_left(snake_case_,snake_case_,snake_case_,snake_case_ ),snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
sorted_collection.insert(bisect_right(snake_case_,snake_case_,snake_case_,snake_case_ ),snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = 0
_A : Optional[Any] = len(snake_case_ ) - 1
while left <= right:
_A : List[str] = left + (right - left) // 2
_A : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_A : List[Any] = midpoint - 1
else:
_A : List[str] = midpoint + 1
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = bisect.bisect_left(snake_case_,snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
if right < left:
return None
_A : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_,snake_case_,snake_case_,midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_,snake_case_,midpoint + 1,snake_case_ )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 26 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
snake_case_,nominal_annual_percentage_rate / 365,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = BarthezTokenizer
_a = BarthezTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Tuple:
super().setUp()
_A : List[str] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_a )
_A : List[str] = tokenizer
def a__ ( self ) -> Any:
_A : Tuple = """<pad>"""
_A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> str:
_A : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_a ) , 10_1122 )
def a__ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_A : Optional[int] = [0, 57, 3018, 7_0307, 91, 2]
_A : List[str] = self.tokenizer(
_a , max_length=len(_a ) , padding=_a , truncation=_a , return_tensors="""pt""" )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_A : Any = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
_A : Union[str, Any] = self.get_tokenizer()
_A : Tuple = self.get_rust_tokenizer()
_A : Optional[int] = """I was born in 92000, and this is falsé."""
_A : Any = tokenizer.tokenize(_a )
_A : Union[str, Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : Any = tokenizer.encode(_a , add_special_tokens=_a )
_A : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_A : Optional[Any] = self.get_rust_tokenizer()
_A : Any = tokenizer.encode(_a )
_A : List[str] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def a__ ( self ) -> int:
# fmt: off
_A : int = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A : Any = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_a , )
| 26 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 100,):
_A : List[str] = x_start
_A : List[str] = fnc(snake_case_ )
_A : Tuple = 0.0
for _ in range(snake_case_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A : List[Any] = (x_end - x_start) / steps + xa
_A : Any = fnc(snake_case_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_A : Optional[Any] = xa
_A : Any = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase_ ( snake_case_ ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 26 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
raise TypeError("""only integers accepted as input""" )
else:
_A : int = str(abs(snake_case_ ) )
_A : Dict = [list(snake_case_ ) for char in range(len(snake_case_ ) )]
for index in range(len(snake_case_ ) ):
num_transpositions[index].pop(snake_case_ )
return max(
int("""""".join(list(snake_case_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_snake_case = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a , _a , _a=1 , _a=False , **_a ) -> Optional[int]:
super().__init__(**_a )
_A : List[Any] = vocab_size
_A : Union[str, Any] = d_embed
_A : str = d_proj
_A : List[str] = cutoffs + [vocab_size]
_A : Optional[int] = [0] + self.cutoffs
_A : Optional[Any] = div_val
_A : List[str] = self.cutoffs[0]
_A : List[str] = len(self.cutoffs ) - 1
_A : Tuple = self.shortlist_size + self.n_clusters
_A : List[str] = keep_order
_A : Tuple = []
_A : int = []
def a__ ( self , _a ) -> int:
if self.n_clusters > 0:
_A : int = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=_a , name="""cluster_weight""" )
_A : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=_a , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_A : int = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=_a , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_a )
else:
self.out_projs.append(_a )
_A : str = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._weight''' , )
_A : Union[str, Any] = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A : Optional[int] = self.d_embed // (self.div_val**i)
_A : Dict = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=_a , name=F'''out_projs_._{i}''' )
self.out_projs.append(_a )
_A : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._weight''' , )
_A : int = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=_a , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_a )
@staticmethod
def a__ ( _a , _a , _a , _a=None ) -> Dict:
_A : Tuple = x
if proj is not None:
_A : Any = tf.einsum("""ibd,ed->ibe""" , _a , _a )
return tf.einsum("""ibd,nd->ibn""" , _a , _a ) + b
@staticmethod
def a__ ( _a , _a ) -> Optional[Any]:
_A : Any = shape_list(_a )
_A : List[str] = tf.range(lp_size[0] , dtype=target.dtype )
_A : Optional[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(_a , _a )
def a__ ( self , _a , _a , _a=True , _a=False ) -> Optional[Any]:
_A : Union[str, Any] = 0
if self.n_clusters == 0:
_A : List[Any] = self._logit(_a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_A : int = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_a , logits=_a )
_A : Union[str, Any] = tf.nn.log_softmax(_a , axis=-1 )
else:
_A : Optional[Any] = shape_list(_a )
_A : Optional[Any] = []
_A : Optional[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_A , _A : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_A : int = (target >= l_idx) & (target < r_idx)
_A : str = tf.where(_a )
_A : Dict = tf.boolean_mask(_a , _a ) - l_idx
if self.div_val == 1:
_A : str = self.out_layers[0][0][l_idx:r_idx]
_A : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
_A : List[Any] = self.out_layers[i][0]
_A : int = self.out_layers[i][1]
if i == 0:
_A : int = tf.concat([cur_W, self.cluster_weight] , 0 )
_A : int = tf.concat([cur_b, self.cluster_bias] , 0 )
_A : Dict = self._logit(_a , _a , _a , self.out_projs[0] )
_A : Optional[Any] = tf.nn.log_softmax(_a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_A : Tuple = tf.boolean_mask(_a , _a )
_A : Union[str, Any] = self._gather_logprob(_a , _a )
else:
_A : Dict = self._logit(_a , _a , _a , self.out_projs[i] )
_A : Union[str, Any] = tf.nn.log_softmax(_a )
_A : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
_A : Dict = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_a )
if target is not None:
_A : Optional[int] = tf.boolean_mask(_a , _a )
_A : Optional[Any] = tf.boolean_mask(_a , _a )
_A : Tuple = self._gather_logprob(_a , _a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_a , -cur_logprob , shape_list(_a ) )
_A : Optional[Any] = tf.concat(_a , axis=-1 )
if target is not None:
if return_mean:
_A : Tuple = tf.reduce_mean(_a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_a , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 26 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase_ ( snake_case_ ):
if not is_accelerate_available():
return method
_A : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case_ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self,*snake_case_,**snake_case_ ):
if hasattr(self,"""_hf_hook""" ) and hasattr(self._hf_hook,"""pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self,*snake_case_,**snake_case_ )
return wrapper
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCAmelCase_ ( snake_case_ ):
_A : typing.Counter[int] = Counter()
for base in range(1,max_perimeter + 1 ):
for perpendicular in range(snake_case_,max_perimeter + 1 ):
_A : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case_ ):
_A : int = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : int = pythagorean_triple(snake_case_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) <= 1:
return lst
_A : int = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_A , _A : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_A : Tuple = 1
return lst
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 1 |
import os
from collections.abc import Iterator
def lowerCAmelCase_ ( snake_case_ = "." ):
for dir_path, dir_names, filenames in os.walk(snake_case_ ):
_A : Any = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case_,snake_case_ ).lstrip("""./""" )
def lowerCAmelCase_ ( snake_case_ ):
return f'''{i * " "}*''' if i else "\n##"
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(snake_case_ )} {new_part.replace("_"," " ).title()}''' )
return new_path
def lowerCAmelCase_ ( snake_case_ = "." ):
_A : Optional[int] = """"""
for filepath in sorted(good_file_paths(snake_case_ ) ):
_A , _A : int = os.path.split(snake_case_ )
if filepath != old_path:
_A : List[str] = print_path(snake_case_,snake_case_ )
_A : List[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_A : int = f'''{filepath}/{filename}'''.replace(""" ""","""%20""" )
_A : Dict = os.path.splitext(filename.replace("""_""",""" """ ).title() )[0]
print(f'''{md_prefix(snake_case_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "efficientnet"
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.25 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.02 , _a = 0.001 , _a = 0.99 , _a = 0.5 , _a = 0.2 , **_a , ) -> List[str]:
super().__init__(**_a )
_A : int = num_channels
_A : Optional[Any] = image_size
_A : int = width_coefficient
_A : Tuple = depth_coefficient
_A : Any = depth_divisor
_A : Any = kernel_sizes
_A : List[str] = in_channels
_A : str = out_channels
_A : str = depthwise_padding
_A : int = strides
_A : Optional[Any] = num_block_repeats
_A : Optional[Any] = expand_ratios
_A : List[str] = squeeze_expansion_ratio
_A : str = hidden_act
_A : Tuple = hidden_dim
_A : Optional[int] = pooling_type
_A : Any = initializer_range
_A : Dict = batch_norm_eps
_A : Tuple = batch_norm_momentum
_A : Union[str, Any] = dropout_rate
_A : Union[str, Any] = drop_connect_rate
_A : Optional[int] = sum(_a ) * 4
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-5
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 1 |
from numpy import exp, pi, sqrt
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.