code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case_ : Dict = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case_ : List[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_UpperCamelCase , output_all_encodings=_UpperCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case_ : Optional[int] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case_ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
snake_case_ : Any = _load_vocab(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , cls=_UpperCamelCase )
snake_case_ : str = nlp.model.BERTModel(
_UpperCamelCase , len(_UpperCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_UpperCamelCase , use_token_type_embed=_UpperCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_UpperCamelCase , use_decoder=_UpperCamelCase , )
original_bort.load_parameters(_UpperCamelCase , cast_dtype=_UpperCamelCase , ignore_extra=_UpperCamelCase )
snake_case_ : Optional[int] = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case_ : int = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_UpperCamelCase ),
}
snake_case_ : Optional[Any] = BertConfig.from_dict(_UpperCamelCase )
snake_case_ : Union[str, Any] = BertForMaskedLM(_UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_UpperCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[Any] = hf_param.shape
snake_case_ : Any = to_torch(params[gluon_param] )
snake_case_ : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case_ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case_ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case_ : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
snake_case_ : Dict = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case_ : List[Any] = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case_ : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case_ : Union[str, Any] = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case_ : str = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case_ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case_ : BertSelfOutput = layer.attention.output
snake_case_ : List[str] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case_ : int = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case_ : Tuple = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case_ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
snake_case_ : List[Any] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case_ : List[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case_ : BertOutput = layer.output
snake_case_ : int = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case_ : Dict = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case_ : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case_ : Tuple = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case_ : str = tokenizer.encode_plus(_UpperCamelCase )['''input_ids''']
# Get gluon output
snake_case_ : Optional[Any] = mx.nd.array([input_ids] )
snake_case_ : str = original_bort(inputs=_UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCamelCase )
snake_case_ : str = BertModel.from_pretrained(_UpperCamelCase )
hf_bort_model.eval()
snake_case_ : Dict = tokenizer.encode_plus(_UpperCamelCase , return_tensors='''pt''' )
snake_case_ : List[Any] = hf_bort_model(**_UpperCamelCase )[0]
snake_case_ : List[Any] = output_gluon[0].asnumpy()
snake_case_ : Tuple = output_hf[0].detach().numpy()
snake_case_ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case_ : Tuple = np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase_ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase_ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : str = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
snake_case_ : str = poker_hands.copy()
shuffle(_UpperCamelCase )
snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' )
snake_case_ : str = True
snake_case_ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
snake_case_ : Dict = line[:14].strip()
snake_case_ : List[str] = line[15:].strip()
snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
snake_case_ : int = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 279 | 1 |
def lowerCamelCase ( a_ ) -> list[list[float]]:
lowerCAmelCase_ = []
for data in source_data:
for i, el in enumerate(a_ ):
if len(a_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a_ ) )
return data_lists
def lowerCamelCase ( a_ , a_ ) -> list[list[float]]:
lowerCAmelCase_ = []
for dlist, weight in zip(a_ , a_ ):
lowerCAmelCase_ = min(a_ )
lowerCAmelCase_ = max(a_ )
lowerCAmelCase_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(a_ )
score_lists.append(a_ )
return score_lists
def lowerCamelCase ( a_ ) -> list[float]:
lowerCAmelCase_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a_ ):
lowerCAmelCase_ = final_scores[j] + ele
return final_scores
def lowerCamelCase ( a_ , a_ ) -> list[list[float]]:
lowerCAmelCase_ = get_data(a_ )
lowerCAmelCase_ = calculate_each_score(a_ , a_ )
lowerCAmelCase_ = generate_final_scores(a_ )
# append scores to source data
for i, ele in enumerate(a_ ):
source_data[i].append(a_ )
return source_data
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( __a , __a , __a , __a , __a ) -> Dict:
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCamelCase__ :int = getattr(__a , __a )
if weight_type is not None:
UpperCamelCase__ :Tuple = getattr(__a , __a ).shape
else:
UpperCamelCase__ :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase__ :str = value
elif weight_type == "weight_g":
UpperCamelCase__ :int = value
elif weight_type == "weight_v":
UpperCamelCase__ :List[str] = value
elif weight_type == "bias":
UpperCamelCase__ :Union[str, Any] = value
else:
UpperCamelCase__ :Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :str = fairseq_model.state_dict()
UpperCamelCase__ :List[Any] = hf_model.feature_extractor
UpperCamelCase__ :Dict = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase__ :Tuple = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ :Any = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__a , __a , __a , __a )
UpperCamelCase__ :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ :Tuple = True
if "*" in mapped_key:
UpperCamelCase__ :Any = name.split(__a )[0].split('''.''' )[-2]
UpperCamelCase__ :Optional[Any] = mapped_key.replace('''*''' , __a )
if "weight_g" in name:
UpperCamelCase__ :List[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ :Optional[Any] = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ :Tuple = '''bias'''
elif "weight" in name:
UpperCamelCase__ :Optional[Any] = '''weight'''
else:
UpperCamelCase__ :str = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a ( __a , __a , __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :int = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ :str = name.split('''.''' )
UpperCamelCase__ :int = int(items[0] )
UpperCamelCase__ :Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase__ :Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase__ :List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase__ :Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase__ :int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def a ( __a , __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Tuple = full_name.split('''adaptor.''' )[-1]
UpperCamelCase__ :int = name.split('''.''' )
if items[1].isdigit():
UpperCamelCase__ :List[str] = int(items[1] )
else:
UpperCamelCase__ :List[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
UpperCamelCase__ :List[str] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
UpperCamelCase__ :int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
UpperCamelCase__ :Optional[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
UpperCamelCase__ :List[str] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__a , __a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
UpperCamelCase__ :Dict = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
UpperCamelCase__ :str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = emb.weight.shape
UpperCamelCase__ :int = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ :Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :str = WavaVecaConfig.from_pretrained(
__a , add_adapter=__a , adapter_stride=__a , adapter_kernel_size=__a , use_auth_token=__a , output_hidden_size=__a , )
UpperCamelCase__ :Dict = MBartConfig.from_pretrained(__a )
# load model
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
UpperCamelCase__ :List[str] = model[0].eval()
# load feature extractor
UpperCamelCase__ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(__a , use_auth_token=__a )
# set weights for wav2vec2 encoder
UpperCamelCase__ :List[str] = WavaVecaModel(__a )
recursively_load_weights_wavaveca(model.encoder , __a )
# load decoder weights
UpperCamelCase__ :Tuple = MBartForCausalLM(__a )
UpperCamelCase__ , UpperCamelCase__ :int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__a )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCamelCase__ :str = SpeechEncoderDecoderModel(encoder=__a , decoder=__a )
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :List[Any] = MBartaaTokenizer(__a )
tokenizer.save_pretrained(__a )
UpperCamelCase__ :Dict = hf_wavavec.config.to_dict()
UpperCamelCase__ :Optional[int] = tokenizer.pad_token_id
UpperCamelCase__ :Tuple = tokenizer.bos_token_id
UpperCamelCase__ :Union[str, Any] = tokenizer.eos_token_id
UpperCamelCase__ :Any = '''mbart50'''
UpperCamelCase__ :Optional[Any] = '''wav2vec2'''
UpperCamelCase__ :List[Any] = tokenizer.eos_token_id
UpperCamelCase__ :Optional[int] = 250004
UpperCamelCase__ :Tuple = tokenizer.eos_token_id
UpperCamelCase__ :int = SpeechEncoderDecoderConfig.from_dict(__a )
hf_wavavec.save_pretrained(__a )
feature_extractor.save_pretrained(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250004, type=int, help='''`decoder_start_token_id` of model config''')
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 97 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = BioGptTokenizer
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = """lower newer"""
lowerCAmelCase_ :Tuple = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ :Union[str, Any] = """lower"""
lowerCAmelCase_ :Any = ["""low""", """er</w>"""]
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase__ = TaTokenizerFast
lowerCAmelCase__ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase__ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 119 | 0 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=__A ):
'''simple docstring'''
lowerCamelCase__ =['speech']
def __init__( self : List[Any] , *a : Tuple , **a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["speech"] )
class _UpperCamelCase ( metaclass=__A ):
'''simple docstring'''
lowerCamelCase__ =['speech']
def __init__( self : Optional[int] , *a : int , **a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["speech"] ) | 76 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( snake_case_ : Tuple ,snake_case_ : Union[str, Any] ,snake_case_ : List[str] ) ->Dict:
'''simple docstring'''
if openai_config_file == "":
__A : Optional[int] = OpenAIGPTConfig()
else:
__A : List[Any] = OpenAIGPTConfig.from_json_file(a_ )
__A : Dict = OpenAIGPTModel(a_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a_ ,a_ ,a_ )
# Save pytorch-model
__A : Any = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__A : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() ,a_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
a_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 367 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : str = StableDiffusionLatentUpscalePipeline
lowercase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
lowercase_ : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
lowercase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Dict = frozenset([] )
lowercase_ : Dict = True
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = 1
_lowercase : List[str] = 4
_lowercase : Optional[Any] = (16, 16)
_lowercase : List[Any] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[Any] = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=lowerCamelCase, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=1_60, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=lowerCamelCase, only_cross_attention=lowerCamelCase, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
_lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
_lowercase : Optional[Any] = EulerDiscreteScheduler(prediction_type='sample')
_lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, hidden_act='quick_gelu', projection_dim=5_12, )
_lowercase : Optional[Any] = CLIPTextModel(lowerCamelCase)
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu'
_lowercase : int = self.get_dummy_components()
_lowercase : Optional[Any] = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 2_56, 2_56, 3))
_lowercase : Union[str, Any] = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
_lowercase : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Dict = self.pipeline_class(**lowerCamelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[Any] = 2
_lowercase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowercase : int = getattr(lowerCamelCase, scheduler_enum.name)
_lowercase : Dict = scheduler_cls.from_config(pipe.scheduler.config)
_lowercase : str = pipe(**lowerCamelCase)[0]
outputs.append(lowerCamelCase)
assert check_same_shape(lowerCamelCase)
@require_torch_gpu
@slow
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = torch.manual_seed(33)
_lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa)
pipe.to('cuda')
_lowercase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa)
upscaler.to('cuda')
_lowercase : List[str] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowercase : Any = pipe(lowerCamelCase, generator=lowerCamelCase, output_type='latent').images
_lowercase : List[Any] = upscaler(
prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=20, guidance_scale=0, generator=lowerCamelCase, output_type='np', ).images[0]
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = torch.manual_seed(33)
_lowercase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa)
upscaler.to('cuda')
_lowercase : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowercase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
_lowercase : int = upscaler(
prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=20, guidance_scale=0, generator=lowerCamelCase, output_type='np', ).images[0]
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5E-2
| 21 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase : List[str] = logging.getLogger(__name__)
def A_ ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , ) -> int:
a__ : int = bnb_quantization_config.load_in_abit
a__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a__ : Any = []
# custom device map
if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1:
a__ : Any = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a__ : Any = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
a__ : int = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a__ : Union[str, Any] = []
a__ : List[str] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
a__ : Optional[Any] = load_in_abit
a__ : Optional[int] = load_in_abit
a__ : str = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a__ : Optional[Any] = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ )
# convert param to the right dtype
a__ : Optional[int] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a__ : Tuple = name.replace('.weight' , '' ).replace('.bias' , '' )
a__ : List[str] = getattr(A__ , A__ , A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
a__ : List[str] = replace_with_bnb_layers(
A__ , A__ , modules_to_not_convert=A__ )
a__ : Optional[Any] = get_quantized_model_device_map(
A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a__ : Union[str, Any] = True
a__ : Optional[int] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A__ , device_map=A__ , offload_dir=A__ )
def A_ ( A__ , A__ , A__=None , A__=None , A__=None ) -> Optional[Any]:
if device_map is None:
if torch.cuda.is_available():
a__ : Dict = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(A__ , A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a__ : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a__ : Any = {}
a__ : Union[str, Any] = special_dtypes
a__ : Any = no_split_module_classes
a__ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a__ : List[Any] = get_balanced_memory(
A__ , low_zero=(device_map == 'balanced_low_0') , max_memory=A__ , **A__ , )
a__ : str = max_memory
a__ : List[str] = infer_auto_device_map(A__ , **A__ )
if isinstance(A__ , A__ ):
# check if don't have any quantized module on the cpu
a__ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a__ : Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def A_ ( A__ , A__ , A__=None , A__=None ) -> Union[str, Any]:
if modules_to_not_convert is None:
a__ : Optional[Any] = []
a__ , a__ : Tuple = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def A_ ( A__ , A__ , A__=None , A__=None , ) -> List[Any]:
a__ : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
a__ : Optional[Any] = []
current_key_name.append(A__ )
if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a__ : Dict = '.'.join(A__ )
a__ : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a__ : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a__ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a__ : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a__ : Union[str, Any] = module.weight.data
if module.bias is not None:
a__ : str = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ , A__ , A__ )
a__ : List[Any] = True
if len(list(module.children() ) ) > 0:
a__ , a__ : int = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
a__ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A_ ( A__ ) -> str:
# Create a copy of the model
with init_empty_weights():
a__ : Any = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a__ : str = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
a__ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a__ : Union[str, Any] = sum(A__ , [] )
a__ : List[Any] = len(A__ ) > 0
# Check if it is a base model
a__ : Dict = False
if hasattr(A__ , 'base_model_prefix' ):
a__ : Tuple = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a__ : List[Any] = list(model.named_children() )
a__ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
a__ : Tuple = set(A__ ) - set(A__ )
a__ : List[str] = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
a__ : str = ['.weight', '.bias']
a__ : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a__ : int = name.replace(A__ , '' )
filtered_module_names.append(A__ )
return filtered_module_names
def A_ ( A__ ) -> Union[str, Any]:
for m in model.modules():
if isinstance(A__ , bnb.nn.Linearabit ):
return True
return False
def A_ ( A__ ) -> int:
return next(parameter.parameters() ).device
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ )
a__ : Optional[Any] = param_name
a__ : Tuple = model
if "." in tensor_name:
a__ : Any = tensor_name.split('.' )
for split in splits[:-1]:
a__ : str = getattr(A__ , A__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
a__ : str = new_module
a__ : List[Any] = splits[-1]
# offload weights
a__ : List[str] = False
offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ , )
else:
offload_weight(A__ , A__ , A__ , index=A__ )
offload_weight(A__ , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ )
set_module_tensor_to_device(A__ , A__ , 'meta' , dtype=A__ , value=torch.empty(*param.size() ) )
| 225 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> Tuple:
'''simple docstring'''
a__ : str = parent
a__ : Optional[Any] = batch_size
a__ : str = seq_length
a__ : int = is_training
a__ : str = use_attention_mask
a__ : List[str] = use_token_type_ids
a__ : Optional[Any] = use_labels
a__ : List[Any] = vocab_size
a__ : Tuple = hidden_size
a__ : Dict = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : int = intermediate_size
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Optional[int] = type_vocab_size
a__ : List[Any] = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : str = num_choices
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Dict = None
if self.use_attention_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
a__ : Dict = None
if self.use_token_type_ids:
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Optional[int] = config_and_inputs
a__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : int = config_and_inputs
a__ : str = True
a__ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = True
__A : Tuple = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = FlaxBertModelTester(self)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased')
a__ : Optional[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase)
| 225 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for data in source_data:
for i, el in enumerate(lowercase_ ):
if len(lowercase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase_ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for dlist, weight in zip(lowercase_ , lowercase_ ):
A__ = min(lowercase_ )
A__ = max(lowercase_ )
A__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A__ = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowercase_ )
score_lists.append(lowercase_ )
return score_lists
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase_ ):
A__ = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = get_data(lowercase_ )
A__ = calculate_each_score(lowercase_ , lowercase_ )
A__ = generate_final_scores(lowercase_ )
# append scores to source data
for i, ele in enumerate(lowercase_ ):
source_data[i].append(lowercase_ )
return source_data
| 14 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_lowercase)
class __snake_case ( _lowercase):
snake_case__ : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True})
snake_case__ : ClassVar[Features] = Features({"image": Image()})
snake_case__ : ClassVar[Features] = Features({"labels": ClassLabel})
snake_case__ : str = "image"
snake_case__ : str = "labels"
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_lowerCamelCase : List[Any] = copy.deepcopy(self )
_lowerCamelCase : str = self.label_schema.copy()
_lowerCamelCase : Union[str, Any] = features[self.label_column]
_lowerCamelCase : List[str] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 72 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase ( ) -> tuple[list[int], int]:
UpperCamelCase : int = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCamelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
__UpperCAmelCase = make_dataset()
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, ...]:
for triplet in permutations(snake_case__ , 3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, int, int]:
arr.sort()
UpperCamelCase : List[str] = len(snake_case__ )
for i in range(n - 1 ):
UpperCamelCase , UpperCamelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase ( ) -> tuple[float, float]:
UpperCamelCase : Any = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
UpperCamelCase : Optional[Any] = '\ntriplet_sum1(*dataset)\n'
UpperCamelCase : Dict = '\ntriplet_sum2(*dataset)\n'
UpperCamelCase : Optional[int] = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
UpperCamelCase : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 119 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict =TypeVar('DatasetType', Dataset, IterableDataset)
def lowerCAmelCase_ ( snake_case_ : List[DatasetType] , snake_case_ : Optional[List[float]] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(snake_case_ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case_ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[DatasetType] , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[NamedSplit] = None , snake_case_ : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(snake_case_ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case_ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
| 106 | '''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase (self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _lowercase (self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 106 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__A = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def lowerCAmelCase_ ( __a=True ) -> int:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = None
lowercase_ = None
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__: List[str] =dataset_module_factory(UpperCAmelCase_ , cache_dir=UpperCAmelCase_)
lowerCamelCase__: List[Any] =import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_)
lowerCamelCase__: DatasetBuilder =builder_cls(
cache_dir=UpperCAmelCase_ , config_name=UpperCAmelCase_ , hash=dataset_module.hash , )
lowerCamelCase__: int ="/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase_).replace(os.sep , "/"),
config.DATASET_INFO_FILENAME,
])
lowerCamelCase__: str =cached_path(UpperCAmelCase_ , cache_dir=UpperCAmelCase_)
self.assertTrue(os.path.exists(UpperCAmelCase_))
@pytest.mark.integration
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: int =tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
lowerCamelCase__: List[str] =dataset_module_factory("wikipedia" , cache_dir=__a )
lowerCamelCase__: Optional[Any] =import_main_class(dataset_module.module_path )
lowerCamelCase__: DatasetBuilder =builder_cls(
cache_dir=__a , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCamelCase__: Union[str, Any] =None
builder_instance.download_and_prepare()
lowerCamelCase__: int =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =dataset_module_factory("wikipedia" , cache_dir=__a )
lowerCamelCase__: Dict =import_main_class(dataset_module.module_path , dataset=__a )
lowerCamelCase__: DatasetBuilder =builder_cls(
cache_dir=__a , config_name="20220301.frr" , hash=dataset_module.hash , )
lowerCamelCase__: Tuple =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__a , __a )
assert "train" in ds
assert isinstance(ds["train"] , __a )
assert next(iter(ds["train"] ) )
| 10 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = F"""class {class_name}("""
_lowerCamelCase = F"""{4 * " "}def {test_name}("""
_lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = []
for line in lines:
if line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
_lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowerCamelCase = _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , '''w''' ) as f:
for line in new_lines:
f.write(lowercase_ )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = {l.strip() for l in f.readlines()}
else:
_lowerCamelCase = None
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = defaultdict(lowercase_ )
for line in correct_lines:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 73 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase_ ( __UpperCAmelCase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['aatype'].device , )
SCREAMING_SNAKE_CASE_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['aatype'].device , )
SCREAMING_SNAKE_CASE_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['aatype'].device , )
SCREAMING_SNAKE_CASE_ = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_ = residx_atomaa_mask
SCREAMING_SNAKE_CASE_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_ = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_ = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_ = residx_atomaa_mask
return protein
def UpperCAmelCase_ ( __UpperCAmelCase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
SCREAMING_SNAKE_CASE_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['aatype'].device ) , __UpperCAmelCase , np.ndarray )
SCREAMING_SNAKE_CASE_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out | 225 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ : str = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
re.sub('<n>' , '' , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) ) | 225 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a ( _lowercase):
_a : List[str] = '''gptj'''
_a : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : int=5_0400 , _SCREAMING_SNAKE_CASE : Tuple=2048 , _SCREAMING_SNAKE_CASE : Optional[int]=4096 , _SCREAMING_SNAKE_CASE : Tuple=28 , _SCREAMING_SNAKE_CASE : Tuple=16 , _SCREAMING_SNAKE_CASE : List[str]=64 , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : int="gelu_new" , _SCREAMING_SNAKE_CASE : Any=0.0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE : Optional[int]=0.0 , _SCREAMING_SNAKE_CASE : Any=1E-5 , _SCREAMING_SNAKE_CASE : List[str]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : List[Any]=5_0256 , _SCREAMING_SNAKE_CASE : Optional[int]=5_0256 , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , **_SCREAMING_SNAKE_CASE : int , )-> Optional[int]:
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : str = n_positions
lowerCAmelCase__ : List[str] = n_embd
lowerCAmelCase__ : Any = n_layer
lowerCAmelCase__ : Optional[Any] = n_head
lowerCAmelCase__ : Union[str, Any] = n_inner
lowerCAmelCase__ : Tuple = rotary_dim
lowerCAmelCase__ : Optional[int] = activation_function
lowerCAmelCase__ : List[Any] = resid_pdrop
lowerCAmelCase__ : Optional[Any] = embd_pdrop
lowerCAmelCase__ : List[Any] = attn_pdrop
lowerCAmelCase__ : Optional[int] = layer_norm_epsilon
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : List[Any] = bos_token_id
lowerCAmelCase__ : Dict = eos_token_id
super().__init__(
bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _a ( _lowercase):
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : PretrainedConfig , _SCREAMING_SNAKE_CASE : str = "default" , _SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , _SCREAMING_SNAKE_CASE : bool = False , )-> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE , task=_SCREAMING_SNAKE_CASE , patching_specs=_SCREAMING_SNAKE_CASE , use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , _SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
lowerCAmelCase__ : Dict = 0
@property
def UpperCAmelCase__( self : Dict )-> Mapping[str, Mapping[int, str]]:
lowerCAmelCase__ : Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase__( self : str )-> int:
return self._config.n_layer
@property
def UpperCAmelCase__( self : Tuple )-> int:
return self._config.n_head
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , )-> Mapping[str, Any]:
lowerCAmelCase__ : Tuple = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : List[str] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[Any] = seqlen + 2
lowerCAmelCase__ : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : List[Any] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : List[str] = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ : Union[str, Any] = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__( self : List[Any] )-> int:
return 13
| 211 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def lowerCamelCase_ ( _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ ( _a , _a , _a=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : Dict = ''''''
else:
lowerCAmelCase__ : List[str] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase__ : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Any = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = dct.pop(_a )
lowerCAmelCase__ : Optional[Any] = val
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ViTConfig()
lowerCAmelCase__ : Optional[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Any = int(vit_name[-12:-10] )
lowerCAmelCase__ : int = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : Dict = 1_000
lowerCAmelCase__ : str = '''huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : str = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ : Any = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Tuple = int(vit_name[-6:-4] )
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase__ : List[str] = 192
lowerCAmelCase__ : Tuple = 768
lowerCAmelCase__ : Optional[int] = 12
lowerCAmelCase__ : List[Any] = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase__ : Any = 384
lowerCAmelCase__ : Optional[int] = 1_536
lowerCAmelCase__ : List[Any] = 12
lowerCAmelCase__ : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Tuple = 2_304
lowerCAmelCase__ : Any = 8
lowerCAmelCase__ : Union[str, Any] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase__ : str = 1_024
lowerCAmelCase__ : Optional[Any] = 4_096
lowerCAmelCase__ : Optional[int] = 24
lowerCAmelCase__ : Tuple = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase__ : Tuple = 1_280
lowerCAmelCase__ : Tuple = 5_120
lowerCAmelCase__ : Optional[int] = 32
lowerCAmelCase__ : List[Any] = 16
# load original model from timm
lowerCAmelCase__ : Tuple = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
lowerCAmelCase__ : List[Any] = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : List[str] = ViTModel(_a ).eval()
else:
lowerCAmelCase__ : Any = ViTForImageClassification(_a ).eval()
model.load_state_dict(_a )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : Dict = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : int = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase__ : List[str] = encoding['''pixel_values''']
lowerCAmelCase__ : List[Any] = model(_a )
if base_model:
lowerCAmelCase__ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Union[str, Any] = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1e-3 )
Path(_a ).mkdir(exist_ok=_a )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 211 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57 |
"""simple docstring"""
import pprint
import requests
_lowercase : Optional[Any] = 'https://zenquotes.io/api'
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_lowercase : int = random_quotes()
pprint.pprint(response)
| 332 | 0 |
import os
from math import logaa
def UpperCamelCase ( _A : str = "base_exp.txt" )-> int:
"""simple docstring"""
A__ = 0
A__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_A ) , _A ) ) ):
A__ , A__ = list(map(_A , line.split("," ) ) )
if x * logaa(_A ) > largest:
A__ = x * logaa(_A )
A__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 371 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = field
A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths}
A__ = Json(
cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , field=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = path_or_buf
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = "utf-8"
A__ = to_json_kwargs
def __A ( self ):
A__ = self.to_json_kwargs.pop("path_or_buf" , UpperCAmelCase__ )
A__ = self.to_json_kwargs.pop("orient" , "records" )
A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
A__ = self.to_json_kwargs.pop("compression" , UpperCAmelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=UpperCAmelCase__ ) as buffer:
A__ = self._write(file_obj=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
A__ = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
return written
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ , A__ , A__ = args
A__ = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **UpperCAmelCase__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ):
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase__ )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase__ , UpperCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(UpperCAmelCase__ )
return written
| 198 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
__UpperCamelCase : List[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 106 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : List[str] = '''true'''
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ):
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = RegressionModel()
lowerCAmelCase__ : Optional[int] = deepcopy(A_ )
lowerCAmelCase__ : Any = RegressionDataset(length=A_ )
lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A_ ):
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = dataset.map(
A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A_ ):
if use_longest:
return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ )
lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = []
for batch in dataloader:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ ,lowerCAmelCase__ : int = [], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'
def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ):
lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ )
# First do baseline
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no''']
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
lowerCAmelCase__ : Optional[int] = model(**A_ )
lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch['''labels'''] )
lowerCAmelCase__ : Dict = metric.compute()
# Then do distributed
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : Union[str, Any] = model(**A_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : int = batch['''labels''']
lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
lowerCAmelCase__ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ : List[str] = Accelerator()
test_torch_metrics(A_ , 5_12 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( A_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 106 | 1 |
def lowerCamelCase__ ( A__ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(A__ , (list, tuple) ) or not all(
isinstance(A__ , A__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = numbers[0]
for i in range(1 , len(A__ ) ):
# update the maximum and minimum subarray products
__lowerCamelCase = numbers[i]
if number < 0:
__lowerCamelCase, __lowerCamelCase = min_till_now, max_till_now
__lowerCamelCase = max(A__ , max_till_now * number )
__lowerCamelCase = min(A__ , min_till_now * number )
# update the maximum product found till now
__lowerCamelCase = max(A__ , A__ )
return max_prod
| 29 |
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if "cls_token" in name:
_SCREAMING_SNAKE_CASE = name.replace("""cls_token""" ,"""vit.embeddings.cls_token""" )
if "mask_token" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mask_token""" ,"""decoder.mask_token""" )
if "decoder_pos_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace("""decoder_pos_embed""" ,"""decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" ,"""vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" ,"""vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" ,"""vit.embeddings.norm""" )
if "decoder_blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace("""decoder_blocks""" ,"""decoder.decoder_layers""" )
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace("""blocks""" ,"""vit.encoder.layer""" )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "decoder_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace("""decoder_embed""" ,"""decoder.decoder_embed""" )
if "decoder_norm" in name:
_SCREAMING_SNAKE_CASE = name.replace("""decoder_norm""" ,"""decoder.decoder_norm""" )
if "decoder_pred" in name:
_SCREAMING_SNAKE_CASE = name.replace("""decoder_pred""" ,"""decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm.weight""" ,"""vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm.bias""" ,"""vit.layernorm.bias""" )
return name
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
_SCREAMING_SNAKE_CASE = int(key_split[1] )
if "decoder_blocks" in key:
_SCREAMING_SNAKE_CASE = config.decoder_hidden_size
_SCREAMING_SNAKE_CASE = 'decoder.decoder_layers.'
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
elif "bias" in key:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = config.hidden_size
_SCREAMING_SNAKE_CASE = 'vit.encoder.layer.'
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
elif "bias" in key:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTMAEConfig()
if "large" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
elif "huge" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 14
_SCREAMING_SNAKE_CASE = 12_80
_SCREAMING_SNAKE_CASE = 51_20
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(lowerCamelCase__ ,map_location="""cpu""" )['model']
_SCREAMING_SNAKE_CASE = ViTMAEImageProcessor(size=config.image_size )
_SCREAMING_SNAKE_CASE = convert_state_dict(lowerCamelCase__ ,lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
_SCREAMING_SNAKE_CASE = Image.open(requests.get(lowerCamelCase__ ,stream=lowerCamelCase__ ).raw )
_SCREAMING_SNAKE_CASE = ViTMAEImageProcessor(size=config.image_size )
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
if "large" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,lowerCamelCase__ ,atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 306 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case : Union[str, Any] =logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""input_values""", """padding_mask"""]
def __init__(self ,__lowerCamelCase = 1 ,__lowerCamelCase = 2_40_00 ,__lowerCamelCase = 0.0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> str:
"""simple docstring"""
super().__init__(feature_size=__lowerCamelCase ,sampling_rate=__lowerCamelCase ,padding_value=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : List[Any] = chunk_length_s
lowerCAmelCase__ : List[Any] = overlap
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Dict = bool(
isinstance(__lowerCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowerCAmelCase__ : Any = [np.asarray(__lowerCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCamelCase ,np.ndarray ):
lowerCAmelCase__ : str = np.asarray(__lowerCamelCase ,dtype=np.floataa )
elif isinstance(__lowerCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : List[str] = [np.asarray(__lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Optional[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCAmelCase__ : str = min(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
lowerCAmelCase__ : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCAmelCase__ : Tuple = max(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Optional[int] = int(np.ceil(max_length / self.chunk_stride ) )
lowerCAmelCase__ : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCAmelCase__ : Tuple = '''max_length'''
else:
lowerCAmelCase__ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowerCAmelCase__ : Any = self.pad(
__lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,padding=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,)
if padding:
lowerCAmelCase__ : Union[str, Any] = padded_inputs.pop('''attention_mask''' )
lowerCAmelCase__ : Union[str, Any] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowerCAmelCase__ : Tuple = example[..., None]
input_values.append(example.T )
lowerCAmelCase__ : str = input_values
if return_tensors is not None:
lowerCAmelCase__ : str = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 94 |
from math import factorial
def lowerCAmelCase__ ( lowerCamelCase_ : int = 100):
'''simple docstring'''
return sum(map(lowerCamelCase_ ,str(factorial(lowerCamelCase_))))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 94 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowercase_ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowercase_ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowercase_ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase_ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowercase_ = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase (__A):
"""simple docstring"""
_a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __A)
return [m.group(0) for m in matches]
def lowerCAmelCase ():
"""simple docstring"""
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
config.replace('''Config''' , ''''''): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a = collections.defaultdict(__A)
_a = collections.defaultdict(__A)
_a = collections.defaultdict(__A)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__A):
_a = None
if _re_tf_models.match(__A) is not None:
_a = tf_models
_a = _re_tf_models.match(__A).groups()[0]
elif _re_flax_models.match(__A) is not None:
_a = flax_models
_a = _re_flax_models.match(__A).groups()[0]
elif _re_pt_models.match(__A) is not None:
_a = pt_models
_a = _re_pt_models.match(__A).groups()[0]
if lookup_dict is not None:
while len(__A) > 0:
if attr_name in model_prefix_to_model_type:
_a = True
break
# Try again after removing the last word in the name
_a = ''''''.join(camel_case_split(__A)[:-1])
_a = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
_a = list(__A)
all_models.sort()
_a = {'''model_type''': all_models}
_a = [pt_models[t] for t in all_models]
_a = [tf_models[t] for t in all_models]
_a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a = '''AutoTokenizer'''
_a = [processors[t] for t in all_models]
return pd.DataFrame(__A)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
_a = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(__A , __A , __A):
# The type of pipeline may not exist in this framework
if not hasattr(__A , __A):
continue
# First extract all model_names
_a = []
for name in getattr(__A , __A).values():
if isinstance(__A , __A):
model_names.append(__A)
else:
model_names.extend(list(__A))
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names})
return table
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_frameworks_table()
_a = Dataset.from_pandas(__A)
_a = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=__A)
_a = Dataset.from_json(__A)
_a = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(__A))
}
_a = update_pipeline_and_auto_class_table(__A)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a = sorted(table.keys())
_a = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
})
_a = Dataset.from_pandas(__A)
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__A , '''frameworks.json'''))
tags_dataset.to_json(os.path.join(__A , '''pipeline_tags.json'''))
if commit_sha is not None:
_a = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
_a = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=__A , repo_type='''dataset''' , token=__A , commit_message=__A , )
def lowerCAmelCase ():
"""simple docstring"""
_a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a = transformers_module.pipelines.SUPPORTED_TASKS
_a = []
for key in pipeline_tasks:
if key not in in_table:
_a = pipeline_tasks[key]['''pt''']
if isinstance(__A , (list, tuple)):
_a = model[0]
_a = model.__name__
if model not in in_table.values():
missing.append(__A)
if len(__A) > 0:
_a = ''', '''.join(__A)
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'''`utils/update_metadata.py`: {msg}. Please add them!''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowercase_ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 211 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a = new_id
# turn into Numpy arrays
_a = np.array(__A)
_a = np.array(__A)
if reduce_labels:
_a = 255
_a = label - 1
_a = 255
_a = label != ignore_index
_a = np.not_equal(__A , __A)
_a = pred_label[mask]
_a = np.array(__A)[mask]
_a = pred_label[pred_label == label]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(__A , __A):
_a , _a , _a , _a = intersect_and_union(
__A , __A , __A , __A , __A , __A)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = None , __A = False , ):
"""simple docstring"""
_a , _a , _a , _a = total_intersect_and_union(
__A , __A , __A , __A , __A , __A)
# compute metrics
_a = {}
_a = total_area_intersect.sum() / total_area_label.sum()
_a = total_area_intersect / total_area_union
_a = total_area_intersect / total_area_label
_a = np.nanmean(__A)
_a = np.nanmean(__A)
_a = all_acc
_a = iou
_a = acc
if nan_to_num is not None:
_a = {metric: np.nan_to_num(__A , nan=__A) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a__ (self , A , A , A , A , A = None , A = None , A = False , ) -> List[Any]:
"""simple docstring"""
_a = mean_iou(
results=A , gt_seg_maps=A , num_labels=A , ignore_index=A , nan_to_num=A , label_map=A , reduce_labels=A , )
return iou_result
| 211 | 1 |
'''simple docstring'''
import numpy as np
import datasets
__lowerCAmelCase = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__lowerCAmelCase = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__lowerCAmelCase = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
# convert to numpy arrays
_snake_case = np.array(UpperCAmelCase )
_snake_case = np.array(UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
_snake_case = X - np.mean(UpperCAmelCase )
_snake_case = np.cov(reference_distribution.T )
try:
_snake_case = np.linalg.inv(UpperCAmelCase )
except np.linalg.LinAlgError:
_snake_case = np.linalg.pinv(UpperCAmelCase )
_snake_case = np.dot(UpperCAmelCase , UpperCAmelCase )
_snake_case = np.dot(UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 270 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCAmelCase = 'bert-base-cased'
__lowerCAmelCase = 'fp16'
__lowerCAmelCase = 'bf16'
__lowerCAmelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
super().setUp()
_snake_case = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowercase (self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCAmelCase ):
_snake_case = self.dist_env.copy()
_snake_case = f"""{i + 1}"""
_snake_case = strategy
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowercase (self ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCAmelCase ):
_snake_case = self.dist_env.copy()
_snake_case = prefetch_policy
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowercase (self ) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCAmelCase ):
_snake_case = self.dist_env.copy()
_snake_case = state_dict_type
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowercase (self ) -> Optional[Any]:
_snake_case = AutoModel.from_pretrained(UpperCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
_snake_case = self.dist_env.copy()
_snake_case = policy
if policy == "TRANSFORMER_BASED_WRAP":
_snake_case = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
_snake_case = """2000"""
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_snake_case = self.dist_env.copy()
_snake_case = """TRANSFORMER_BASED_WRAP"""
_snake_case = """T5Layer"""
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
_snake_case = self.dist_env.copy()
_snake_case = """SIZE_BASED_WRAP"""
_snake_case = """0"""
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowercase (self ) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_snake_case = self.dist_env.copy()
_snake_case = mp_dtype
with mockenv_context(**UpperCAmelCase ):
_snake_case = Accelerator()
if mp_dtype == "fp16":
_snake_case = torch.floataa
elif mp_dtype == "bf16":
_snake_case = torch.bfloataa
_snake_case = MixedPrecision(param_dtype=UpperCAmelCase , reduce_dtype=UpperCAmelCase , buffer_dtype=UpperCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_snake_case = self.dist_env.copy()
_snake_case = str(UpperCAmelCase ).lower()
with mockenv_context(**UpperCAmelCase ):
_snake_case = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def lowercase (self ) -> List[Any]:
super().setUp()
_snake_case = 0.82
_snake_case = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
_snake_case = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_snake_case = 160
_snake_case = 160
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowercase (self ) -> Any:
_snake_case = os.path.join(self.test_scripts_folder , """test_performance.py""" )
_snake_case = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
_snake_case = cmd.copy()
for i, strategy in enumerate(UpperCAmelCase ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
def lowercase (self ) -> Union[str, Any]:
_snake_case = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
_snake_case = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(UpperCAmelCase ):
_snake_case = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_snake_case = len(UpperCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_snake_case = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
_snake_case = cmd_config[:-1]
_snake_case = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
def lowercase (self ) -> Optional[Any]:
_snake_case = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
_snake_case = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_snake_case = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(UpperCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() ) | 270 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 174 | '''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : str = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[int] = ffn_dropout
lowercase__ : int = attn_dropout
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = MobileViTVaModelTester(self )
lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCAmelCase( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = model.to(__lowerCAmelCase )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__lowerCAmelCase )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : List[str] = model.to(__lowerCAmelCase )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = prepare_img()
lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.detach().cpu()
lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase__ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 198 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( __a : int ,__a : List[Any] ) -> Optional[int]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( __a : Any ) -> Optional[Any]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( __a : Any ,__a : List[str] ,*__a : Any ) -> Dict:
"""simple docstring"""
try:
return fun(__a ,*__a ), None
except Exception as e:
return None, e
a__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
a__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
a__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
a__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' ,(
pytest.param(_add_items ,id='''add items''' ),
pytest.param(_overwrite_items ,id='''overwrite items''' ),
pytest.param(_delete_items ,id='''delete items''' ),
pytest.param(_access_absent_items ,id='''access absent items''' ),
pytest.param(_add_with_resize_up ,id='''add with resize up''' ),
pytest.param(_add_with_resize_down ,id='''add with resize down''' ),
) ,)
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : str = HashMap(initial_block_size=4 )
_a : Union[str, Any] = {}
for _, (fun, *args) in enumerate(__a ):
_a , _a : List[Any] = _run_operation(__a ,__a ,*__a )
_a , _a : List[Any] = _run_operation(__a ,__a ,*__a )
assert my_res == py_res
assert str(__a ) == str(__a )
assert set(__a ) == set(__a )
assert len(__a ) == len(__a )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
def is_public(__a : str ) -> bool:
return not name.startswith('''_''' )
_a : List[Any] = {name for name in dir({} ) if is_public(__a )}
_a : Tuple = {name for name in dir(HashMap() ) if is_public(__a )}
assert dict_public_names > hash_public_names
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = '''autoformer'''
_snake_case : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 6_4 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 0.02 , _UpperCamelCase = True , _UpperCamelCase=True , _UpperCamelCase = 1_0 , _UpperCamelCase = 2_5 , _UpperCamelCase = 3 , **_UpperCamelCase , ) -> Union[str, Any]:
# time series specific configuration
UpperCAmelCase_ : str = prediction_length
UpperCAmelCase_ : str = context_length if context_length is not None else prediction_length
UpperCAmelCase_ : Union[str, Any] = distribution_output
UpperCAmelCase_ : List[Any] = loss
UpperCAmelCase_ : Any = input_size
UpperCAmelCase_ : List[str] = num_time_features
UpperCAmelCase_ : Optional[Any] = lags_sequence
UpperCAmelCase_ : Optional[int] = scaling
UpperCAmelCase_ : Union[str, Any] = num_dynamic_real_features
UpperCAmelCase_ : Union[str, Any] = num_static_real_features
UpperCAmelCase_ : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : Union[str, Any] = cardinality
else:
UpperCAmelCase_ : int = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : Tuple = embedding_dimension
else:
UpperCAmelCase_ : List[str] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : List[Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = encoder_ffn_dim
UpperCAmelCase_ : Dict = decoder_ffn_dim
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : List[Any] = encoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : Union[str, Any] = init_std
UpperCAmelCase_ : Optional[int] = use_cache
# Autoformer
UpperCAmelCase_ : int = label_length
UpperCAmelCase_ : List[Any] = moving_average
UpperCAmelCase_ : Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 1 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero.")
a = b * b - 4 * a * c
a = (-b + sqrt(__UpperCamelCase)) / (2 * a)
a = (-b - sqrt(__UpperCamelCase)) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE ( ) -> int:
a , a = quadratic_roots(a=5 , b=6 , c=1)
print(f'''The solutions are: {solutiona} and {solutiona}''')
if __name__ == "__main__":
main()
| 180 |
lowercase__ : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 180 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Any = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _snake_case ( _snake_case , _snake_case ):
SCREAMING_SNAKE_CASE__ = 'swin'
SCREAMING_SNAKE_CASE__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=96 , _lowerCamelCase=[2, 2, 6, 2] , _lowerCamelCase=[3, 6, 12, 24] , _lowerCamelCase=7 , _lowerCamelCase=4.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=32 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Optional[Any] = image_size
a :List[Any] = patch_size
a :Any = num_channels
a :str = embed_dim
a :str = depths
a :Optional[Any] = len(_lowerCamelCase )
a :int = num_heads
a :str = window_size
a :List[Any] = mlp_ratio
a :Union[str, Any] = qkv_bias
a :Optional[Any] = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :Any = drop_path_rate
a :Dict = hidden_act
a :Union[str, Any] = use_absolute_embeddings
a :Tuple = layer_norm_eps
a :Optional[Any] = initializer_range
a :List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a :List[str] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
a :List[Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
a , a :Dict = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 94 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
require_version(deps[pkg] , UpperCAmelCase_ )
| 94 | 1 |
"""simple docstring"""
import argparse
import os
import re
UpperCAmelCase : Union[str, Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
UpperCAmelCase : Any = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase : int = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase : List[str] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase : List[str] = re.compile(r"\[([^\]]+)\]")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = _re_indent.search(__lowerCAmelCase )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
lowercase_ = 0
lowercase_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__lowerCAmelCase ):
index += 1
lowercase_ = ["""\n""".join(lines[:index] )]
else:
lowercase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ = [lines[index]]
index += 1
while index < len(__lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__lowerCAmelCase ) )
if index < len(__lowerCAmelCase ) - 1:
lowercase_ = [lines[index + 1]]
index += 1
else:
lowercase_ = []
else:
blocks.append("""\n""".join(__lowerCAmelCase ) )
lowercase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCAmelCase ) > 0:
blocks.append("""\n""".join(__lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def _inner(__lowerCAmelCase ):
return key(__lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__lowerCAmelCase ):
return x
if key is None:
lowercase_ = noop
# Constants are all uppercase, they go first.
lowercase_ = [obj for obj in objects if key(__lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ = [obj for obj in objects if key(__lowerCAmelCase )[0].isupper() and not key(__lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ = [obj for obj in objects if not key(__lowerCAmelCase )[0].isupper()]
lowercase_ = ignore_underscore(__lowerCAmelCase )
return sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def _replace(__lowerCAmelCase ):
lowercase_ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] ) + "]"
lowercase_ = import_statement.split("""\n""" )
if len(__lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ = 2 if lines[1].strip() == """[""" else 1
lowercase_ = [(i, _re_strip_line.search(__lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ = sort_objects(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] )
lowercase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
lowercase_ = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] )
return "\n".join(__lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ = _re_bracket_content.sub(_replace , __lowerCAmelCase )
return import_statement
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=True ) -> Dict:
'''simple docstring'''
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
lowercase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ = split_code_in_indented_blocks(
__lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ = main_blocks[block_idx]
lowercase_ = block.split("""\n""" )
# Get to the start of the imports.
lowercase_ = 0
while line_idx < len(__lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ = len(__lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ = """\n""".join(block_lines[line_idx:-1] )
lowercase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ = split_code_in_indented_blocks(__lowerCAmelCase , indent_level=__lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ = [(pattern.search(__lowerCAmelCase ).groups()[0] if pattern.search(__lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ = [(i, key) for i, key in enumerate(__lowerCAmelCase ) if key is not None]
lowercase_ = [x[0] for x in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ = 0
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=True ) -> int:
'''simple docstring'''
lowercase_ = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
lowercase_ = sort_imports(os.path.join(__lowerCAmelCase , """__init__.py""" ) , check_only=__lowerCAmelCase )
if result:
lowercase_ = [os.path.join(__lowerCAmelCase , """__init__.py""" )]
if len(__lowerCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(__lowerCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCAmelCase : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 370 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = """deit"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dict=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_24 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=16 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = encoder_stride
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = version.parse("""1.11""" )
@property
def __A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self : Dict ) -> float:
return 1e-4
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
'''simple docstring'''
UpperCAmelCase_ = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """cvt"""
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 1_92, 3_84] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Optional[int]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.1] , _UpperCAmelCase : Optional[int]=[True, True, True] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : int=[3, 3, 3] , _UpperCAmelCase : Optional[int]=[1, 1, 1] , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[1, 1, 1] , _UpperCAmelCase : str=[1, 1, 1] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1E-12 , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = patch_stride
UpperCAmelCase__ = patch_padding
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = depth
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = attention_drop_rate
UpperCAmelCase__ = drop_rate
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = cls_token
UpperCAmelCase__ = qkv_projection_method
UpperCAmelCase__ = kernel_qkv
UpperCAmelCase__ = padding_kv
UpperCAmelCase__ = stride_kv
UpperCAmelCase__ = padding_q
UpperCAmelCase__ = stride_q
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
| 61 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
return setitem, k, v
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
return delitem, k
def UpperCAmelCase ( a_ , a_ , *a_ ) -> Any:
"""simple docstring"""
try:
return fun(a_ , *a_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE :int = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
SCREAMING_SNAKE_CASE :int = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
SCREAMING_SNAKE_CASE :str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
SCREAMING_SNAKE_CASE :List[str] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
SCREAMING_SNAKE_CASE :str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE :str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
__A = HashMap(initial_block_size=4 )
__A = {}
for _, (fun, *args) in enumerate(a_ ):
__A , __A = _run_operation(a_ , a_ , *a_ )
__A , __A = _run_operation(a_ , a_ , *a_ )
assert my_res == py_res
assert str(a_ ) == str(a_ )
assert set(a_ ) == set(a_ )
assert len(a_ ) == len(a_ )
assert set(my.items() ) == set(py.items() )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
def is_public(a_ ) -> bool:
return not name.startswith("_" )
__A = {name for name in dir({} ) if is_public(a_ )}
__A = {name for name in dir(HashMap() ) if is_public(a_ )}
assert dict_public_names > hash_public_names
| 15 |
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = [0] * len(a_ )
__A = []
__A = [1] * len(a_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
__A = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a_ )
print(max(a_ ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 15 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """ClapFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : Dict = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_snake_case = kwargs.pop("sampling_rate" ,_SCREAMING_SNAKE_CASE )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
_snake_case = self.tokenizer(_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
if audios is not None:
_snake_case = self.feature_extractor(
_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
if text is not None and audios is not None:
_snake_case = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) ,tensor_type=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> str:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> Union[str, Any]:
_snake_case = self.tokenizer.model_input_names
_snake_case = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 142 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142 | 1 |
from __future__ import annotations
def snake_case ( snake_case__ :list[list[int]]) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__)):
for j in range(1 , len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_SCREAMING_SNAKE_CASE = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_SCREAMING_SNAKE_CASE = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = VOCAB_FILES_NAMES
lowerCamelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :Optional[Any] = BertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[str]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A = getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**lowerCAmelCase_ )
_A = do_lower_case
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[str]:
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 180 | 1 |
def UpperCamelCase ( _A ):
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(_A ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 138 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
__magic_name__ : List[str] = tempfile.mkdtemp()
# fmt: off
__magic_name__ : Union[str, Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__magic_name__ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__magic_name__ : Any = {"""unk_token""": """<unk>"""}
__magic_name__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
__magic_name__ : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__magic_name__ : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[str]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> int:
__magic_name__ : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__magic_name__ : Any = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__magic_name__ : int = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : Any = self.get_image_processor(do_normalize=lowerCAmelCase__ )
__magic_name__ : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Dict = self.prepare_image_inputs()
__magic_name__ : Any = image_processor(lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : str = processor(images=lowerCAmelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : int = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Optional[int] = """lower newer"""
__magic_name__ : Tuple = processor(text=lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : Optional[int] = tokenizer(lowerCAmelCase__ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Any = """lower newer"""
__magic_name__ : Union[str, Any] = self.prepare_image_inputs()
__magic_name__ : int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = """google/owlvit-base-patch32"""
__magic_name__ : int = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__magic_name__ : List[Any] = ["""cat""", """nasa badge"""]
__magic_name__ : Any = processor(text=lowerCAmelCase__ )
__magic_name__ : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[str] = """google/owlvit-base-patch32"""
__magic_name__ : Optional[Any] = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__magic_name__ : Tuple = [["""cat""", """nasa badge"""], ["""person"""]]
__magic_name__ : Tuple = processor(text=lowerCAmelCase__ )
__magic_name__ : str = 16
__magic_name__ : str = len(lowerCAmelCase__ )
__magic_name__ : int = max([len(lowerCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[int] = """google/owlvit-base-patch32"""
__magic_name__ : Any = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__magic_name__ : str = ["""cat""", """nasa badge"""]
__magic_name__ : List[str] = processor(text=lowerCAmelCase__ )
__magic_name__ : List[Any] = 16
__magic_name__ : Any = inputs["""input_ids"""]
__magic_name__ : Optional[Any] = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Tuple = self.prepare_image_inputs()
__magic_name__ : List[Any] = self.prepare_image_inputs()
__magic_name__ : List[str] = processor(images=lowerCAmelCase__ , query_images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[Any] = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : Optional[Any] = processor.batch_decode(lowerCAmelCase__ )
__magic_name__ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 138 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
if n == 1 or not isinstance(a__ ,a__ ):
return 0
elif n == 2:
return 1
else:
lowercase_ :List[Any] = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
lowercase_ :Any = 0
lowercase_ :str = 2
while digits < n:
index += 1
lowercase_ :Any = len(str(fibonacci(a__ ) ) )
return index
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] = 10_00 ):
return fibonacci_digits_index(a__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 223 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313 | 0 |
import re
from filelock import FileLock
try:
import nltk
a_ = True
except (ImportError, ModuleNotFoundError):
a_ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowercase ( snake_case_ : str ) ->Tuple:
'''simple docstring'''
re.sub('''<n>''' ,'''''' ,lowerCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
| 350 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """sew-d"""
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase=2 , __lowerCamelCase=512 , __lowerCamelCase=256 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=("p2c", "c2p") , __lowerCamelCase="layer_norm" , __lowerCamelCase="gelu_python" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-7 , __lowerCamelCase=1e-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase=False , __lowerCamelCase=128 , __lowerCamelCase=16 , __lowerCamelCase=True , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__A : str = hidden_size
__A : List[Any] = feat_extract_norm
__A : Tuple = feat_extract_activation
__A : Dict = list(__lowerCamelCase )
__A : int = list(__lowerCamelCase )
__A : List[Any] = list(__lowerCamelCase )
__A : Any = conv_bias
__A : List[Any] = num_conv_pos_embeddings
__A : Any = num_conv_pos_embedding_groups
__A : Optional[Any] = len(self.conv_dim )
__A : int = num_hidden_layers
__A : Union[str, Any] = intermediate_size
__A : Union[str, Any] = squeeze_factor
__A : int = max_position_embeddings
__A : Tuple = position_buckets
__A : Tuple = share_att_key
__A : List[str] = relative_attention
__A : Optional[Any] = norm_rel_ebd
__A : Dict = list(__lowerCamelCase )
__A : str = hidden_act
__A : List[str] = num_attention_heads
__A : Union[str, Any] = hidden_dropout
__A : Optional[int] = attention_dropout
__A : Optional[Any] = activation_dropout
__A : List[str] = feat_proj_dropout
__A : str = final_dropout
__A : Tuple = layer_norm_eps
__A : int = feature_layer_norm_eps
__A : Optional[int] = initializer_range
__A : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : int = apply_spec_augment
__A : Any = mask_time_prob
__A : Optional[int] = mask_time_length
__A : Any = mask_time_min_masks
__A : int = mask_feature_prob
__A : Tuple = mask_feature_length
__A : Dict = mask_feature_min_masks
# ctc loss
__A : Tuple = ctc_loss_reduction
__A : Union[str, Any] = ctc_zero_infinity
# sequence classification
__A : Tuple = use_weighted_layer_sum
__A : List[str] = classifier_proj_size
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase : List[str] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_snake_case = Github(os.environ['GITHUB_TOKEN'] )
_snake_case = g.get_repo('huggingface/diffusers' )
_snake_case = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case = sorted(issue.get_comments() , key=lambda __A : i.created_at , reverse=__A )
_snake_case = comments[0] if len(__A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 42 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ : Dict = MaskFormerConfig(backbone_config=__lowerCamelCase )
UpperCAmelCase_ : int = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ : Dict = 847
UpperCAmelCase_ : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ : Tuple = 150
UpperCAmelCase_ : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ : str = 171
UpperCAmelCase_ : Optional[int] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ : int = 133
UpperCAmelCase_ : Tuple = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ : List[Any] = 19
UpperCAmelCase_ : Optional[int] = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ : Any = 65
UpperCAmelCase_ : Union[str, Any] = "mapillary-vistas-id2label.json"
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : str = val
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Tuple = in_proj_weight[:dim, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[: dim]
UpperCAmelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : Tuple = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Tuple = in_proj_bias[-dim :]
# fmt: on
def __a ( __lowerCamelCase, __lowerCamelCase ):
# fmt: off
UpperCAmelCase_ : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : int = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : Dict = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : str = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : Tuple = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : List[Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ):
UpperCAmelCase_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Tuple = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : List[str] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase, "rb" ) as f:
UpperCAmelCase_ : Union[str, Any] = pickle.load(__lowerCamelCase )
UpperCAmelCase_ : str = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ : int = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase, __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
UpperCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase, param.shape )
UpperCAmelCase_ , UpperCAmelCase_ : str = model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ : Optional[int] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ : List[str] = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ : Tuple = 6_5535
else:
UpperCAmelCase_ : Dict = 255
UpperCAmelCase_ : Optional[Any] = True if "ade" in model_name else False
UpperCAmelCase_ : Dict = MaskFormerImageProcessor(ignore_index=__lowerCamelCase, reduce_labels=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = image_processor(__lowerCamelCase, return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**__lowerCamelCase )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ : Any = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowerCamelCase, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 61 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str=3 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[int]=None , ) -> Any:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[int] ) -> int:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCamelCase , )
def lowercase_ ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = FalconModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (FalconForCausalLM,) if is_torch_available() else ()
a = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowercase_ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__,*SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(__lowerCamelCase , *__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''single_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , use_cache=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(__lowerCamelCase , __lowerCamelCase )
for layer in range(len(__lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self : str ) -> Tuple:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCamelCase , '''use_cache''' ):
return
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase ).to(__lowerCamelCase )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(__lowerCamelCase , '''decoder_layers''' , __lowerCamelCase )
or getattr(__lowerCamelCase , '''num_decoder_layers''' , __lowerCamelCase )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''d_model''' , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs['''past_key_values''']
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = inputs['''input_ids'''].shape
for i in range(__lowerCamelCase ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE__ = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase_ ( self : Tuple ) -> Optional[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(device=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=20 , use_cache=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=20 , use_cache=__lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 361 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : Any = {'vocab_file': 'spiece.model'}
_A : List[Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_A : str = {
't5-small': 5_12,
't5-base': 5_12,
't5-large': 5_12,
't5-3b': 5_12,
't5-11b': 5_12,
}
_A : List[str] = '▁'
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , A : Any , A : Dict="</s>" , A : Any="<unk>" , A : int="<pad>" , A : Optional[Any]=1_0_0 , A : Any=None , A : Optional[Dict[str, Any]] = None , A : Tuple=True , **A : List[str] , ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ : Dict = [F"<extra_id_{i}>" for i in range(A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase__ : Optional[Any] = len(set(filter(lambda A : bool('''extra_id''' in str(A ) ) , A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
lowerCamelCase__ : List[Any] = legacy
lowerCamelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , pad_token=A , extra_ids=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy=A , **A , )
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : str = extra_ids
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@staticmethod
def __lowerCamelCase ( A : Dict , A : int , A : Union[str, Any] ) ->Optional[int]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCamelCase__ : str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , A , )
return max_model_length
@property
def __lowerCamelCase ( self : List[Any] ) ->int:
return self.sp_model.get_piece_size() + self._extra_ids
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def __lowerCamelCase ( self : Tuple ) ->Union[str, Any]:
return list(
set(filter(lambda A : bool(re.search(R'''<extra_id_\d+>''' , A ) ) is not None , self.additional_special_tokens ) ) )
def __lowerCamelCase ( self : Optional[int] ) ->int:
return [self._convert_token_to_id(A ) for token in self.get_sentinel_tokens()]
def __lowerCamelCase ( self : List[Any] , A : List[int] ) ->List[int]:
if len(A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCamelCase ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : Dict = self._add_eos_if_not_present(A )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase__ : List[Any] = self._add_eos_if_not_present(A )
return token_ids_a + token_ids_a
def __getstate__( self : List[str] ) ->Dict:
lowerCamelCase__ : Any = self.__dict__.copy()
lowerCamelCase__ : Optional[int] = None
return state
def __setstate__( self : int , A : Tuple ) ->List[str]:
lowerCamelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self : Dict , A : "TextInput" , **A : Dict ) ->List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowerCamelCase__ : int = SPIECE_UNDERLINE + text.replace(A , ''' ''' )
return super().tokenize(A , **A )
def __lowerCamelCase ( self : Any , A : List[str] , **A : List[Any] ) ->List[str]:
if not self.legacy:
lowerCamelCase__ : Dict = text.startswith(A )
if is_first:
lowerCamelCase__ : Tuple = text[1:]
lowerCamelCase__ : Any = self.sp_model.encode(A , out_type=A )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(A ):
lowerCamelCase__ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __lowerCamelCase ( self : List[str] , A : List[str] ) ->List[str]:
if token.startswith('''<extra_id_''' ):
lowerCamelCase__ : int = re.match(R'''<extra_id_(\d+)>''' , A )
lowerCamelCase__ : Tuple = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(A )
def __lowerCamelCase ( self : List[Any] , A : Union[str, Any] ) ->int:
if index < self.sp_model.get_piece_size():
lowerCamelCase__ : Any = self.sp_model.IdToPiece(A )
else:
lowerCamelCase__ : List[str] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __lowerCamelCase ( self : Optional[Any] , A : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[Any] = ''''''
lowerCamelCase__ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(A )
lowerCamelCase__ : Any = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __lowerCamelCase ( self : Dict , A : str , A : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowerCamelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 142 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , A : int , A : int , A : int , A : Union[str, Any]=0.0 , A : Optional[int] = None , A : str = "geglu" , A : Optional[int] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : str = "layer_norm" , A : bool = False , ) ->Any:
super().__init__()
lowerCamelCase__ : int = only_cross_attention
lowerCamelCase__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCamelCase__ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[Any] = AdaLayerNorm(A , A )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ : int = AdaLayerNormZero(A , A )
else:
lowerCamelCase__ : Dict = nn.LayerNorm(A , elementwise_affine=A )
lowerCamelCase__ : Any = Attention(
query_dim=A , heads=A , dim_head=A , dropout=A , bias=A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase__ : Tuple = (
AdaLayerNorm(A , A )
if self.use_ada_layer_norm
else nn.LayerNorm(A , elementwise_affine=A )
)
lowerCamelCase__ : int = Attention(
query_dim=A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A , dim_head=A , dropout=A , bias=A , upcast_attention=A , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Tuple = None
# 3. Feed-forward
lowerCamelCase__ : Optional[int] = nn.LayerNorm(A , elementwise_affine=A )
lowerCamelCase__ : Union[str, Any] = FeedForward(A , dropout=A , activation_fn=A , final_dropout=A )
# let chunk size default to None
lowerCamelCase__ : str = None
lowerCamelCase__ : Tuple = 0
def __lowerCamelCase ( self : Any , A : Optional[int] , A : int ) ->List[str]:
# Sets chunk feed-forward
lowerCamelCase__ : List[Any] = chunk_size
lowerCamelCase__ : List[str] = dim
def __lowerCamelCase ( self : str , A : torch.FloatTensor , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Dict[str, Any] = None , A : Optional[torch.LongTensor] = None , ) ->Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCamelCase__ : Union[str, Any] = self.norma(A , A )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.norma(
A , A , A , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase__ : List[str] = self.norma(A )
lowerCamelCase__ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase__ : Any = self.attna(
A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A , **A , )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Any = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase__ : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase__ : int = (
self.norma(A , A ) if self.use_ada_layer_norm else self.norma(A )
)
lowerCamelCase__ : int = self.attna(
A , encoder_hidden_states=A , attention_mask=A , **A , )
lowerCamelCase__ : Any = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase__ : Union[str, Any] = self.norma(A )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
lowerCamelCase__ : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase__ : Optional[int] = torch.cat(
[self.ff(A ) for hid_slice in norm_hidden_states.chunk(A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase__ : Optional[int] = self.ff(A )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase__ : List[Any] = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : Optional[int] = None , A : int = 4 , A : float = 0.0 , A : str = "geglu" , A : bool = False , ) ->int:
super().__init__()
lowerCamelCase__ : List[Any] = int(dim * mult )
lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase__ : int = GELU(A , A )
if activation_fn == "gelu-approximate":
lowerCamelCase__ : Optional[int] = GELU(A , A , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCamelCase__ : Any = GEGLU(A , A )
elif activation_fn == "geglu-approximate":
lowerCamelCase__ : int = ApproximateGELU(A , A )
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(A )
# project dropout
self.net.append(nn.Dropout(A ) )
# project out
self.net.append(nn.Linear(A , A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A ) )
def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Optional[Any]:
for module in self.net:
lowerCamelCase__ : int = module(A )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int , A : int , A : str = "none" ) ->Optional[Any]:
super().__init__()
lowerCamelCase__ : List[Any] = nn.Linear(A , A )
lowerCamelCase__ : Any = approximate
def __lowerCamelCase ( self : List[str] , A : Tuple ) ->str:
if gate.device.type != "mps":
return F.gelu(A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __lowerCamelCase ( self : List[str] , A : str ) ->Optional[int]:
lowerCamelCase__ : List[str] = self.proj(A )
lowerCamelCase__ : Optional[int] = self.gelu(A )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int , A : int ) ->Dict:
super().__init__()
lowerCamelCase__ : Optional[Any] = nn.Linear(A , dim_out * 2 )
def __lowerCamelCase ( self : List[Any] , A : List[Any] ) ->Tuple:
if gate.device.type != "mps":
return F.gelu(A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowerCamelCase ( self : Any , A : Union[str, Any] ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.proj(A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(A )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : int ) ->str:
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Linear(A , A )
def __lowerCamelCase ( self : Union[str, Any] , A : Dict ) ->Optional[Any]:
lowerCamelCase__ : List[str] = self.proj(A )
return x * torch.sigmoid(1.7_02 * x )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , A : Dict , A : Optional[Any] ) ->str:
super().__init__()
lowerCamelCase__ : List[str] = nn.Embedding(A , A )
lowerCamelCase__ : str = nn.SiLU()
lowerCamelCase__ : int = nn.Linear(A , embedding_dim * 2 )
lowerCamelCase__ : Optional[Any] = nn.LayerNorm(A , elementwise_affine=A )
def __lowerCamelCase ( self : int , A : Union[str, Any] , A : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(A ) ) )
lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(A , 2 )
lowerCamelCase__ : Any = self.norm(A ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , A : Optional[Any] , A : int ) ->str:
super().__init__()
lowerCamelCase__ : Union[str, Any] = CombinedTimestepLabelEmbeddings(A , A )
lowerCamelCase__ : int = nn.SiLU()
lowerCamelCase__ : List[str] = nn.Linear(A , 6 * embedding_dim , bias=A )
lowerCamelCase__ : str = nn.LayerNorm(A , elementwise_affine=A , eps=1e-6 )
def __lowerCamelCase ( self : List[str] , A : Any , A : List[Any] , A : Tuple , A : Dict=None ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.linear(self.silu(self.emb(A , A , hidden_dtype=A ) ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = emb.chunk(6 , dim=1 )
lowerCamelCase__ : List[Any] = self.norm(A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : int , A : int , A : Optional[str] = None , A : float = 1e-5 ) ->Any:
super().__init__()
lowerCamelCase__ : int = num_groups
lowerCamelCase__ : List[str] = eps
if act_fn is None:
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = get_activation(A )
lowerCamelCase__ : Any = nn.Linear(A , out_dim * 2 )
def __lowerCamelCase ( self : List[str] , A : Optional[int] , A : str ) ->Tuple:
if self.act:
lowerCamelCase__ : Union[str, Any] = self.act(A )
lowerCamelCase__ : Optional[Any] = self.linear(A )
lowerCamelCase__ : Optional[Any] = emb[:, :, None, None]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.chunk(2 , dim=1 )
lowerCamelCase__ : str = F.group_norm(A , self.num_groups , eps=self.eps )
lowerCamelCase__ : Dict = x * (1 + scale) + shift
return x
| 142 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''philschmid/bart-large-cnn-samsum'''
lowerCAmelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCAmelCase = '''summarizer'''
lowerCAmelCase = AutoTokenizer
lowerCAmelCase = AutoModelForSeqaSeqLM
lowerCAmelCase = ['''text''']
lowerCAmelCase = ['''text''']
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.pre_processor(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.model.generate(**_UpperCAmelCase)[0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.pre_processor.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase) | 190 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Dict = 3
__A : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : int = torch.load(_UpperCAmelCase, map_location='cpu' )
if "model" in sd.keys():
lowerCAmelCase : List[str] = torch.load(_UpperCAmelCase, map_location='cpu' )['model']
# pop unnecessary weights
lowerCAmelCase : Optional[int] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCAmelCase )
lowerCAmelCase : Any = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase : Union[str, Any] = sd.pop(_UpperCAmelCase )
lowerCAmelCase : List[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase : Any = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase : Optional[int] = key.replace('.qkv_proj.', '.q_proj.' )
lowerCAmelCase : List[str] = key.replace('.qkv_proj.', '.k_proj.' )
lowerCAmelCase : Optional[Any] = key.replace('.qkv_proj.', '.v_proj.' )
lowerCAmelCase : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = torch.split(_UpperCAmelCase, depth // 3, dim=0 )
lowerCAmelCase : List[Any] = q
lowerCAmelCase : Optional[int] = k
lowerCAmelCase : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = load_checkpoint(_UpperCAmelCase )
if config is not None:
lowerCAmelCase : str = OPTConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCAmelCase : List[Any] = OPTConfig()
lowerCAmelCase : int = OPTModel(_UpperCAmelCase ).half().eval()
model.load_state_dict(_UpperCAmelCase )
# Check results
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__A : Optional[int] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 138 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase : Dict = cva.getAffineTransform(_UpperCAmelCase, _UpperCAmelCase )
return cva.warpAffine(_UpperCAmelCase, _UpperCAmelCase, (rows, cols) )
if __name__ == "__main__":
# read original image
__A : List[str] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__A : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Optional[Any] = gray_img.shape
# set different points to rotate image
__A : int = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : List[str] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Union[str, Any] = plt.figure(1)
__A : Optional[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 138 | 1 |
import sys
lowerCamelCase__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A(__a: str ):
lowerCAmelCase_ = 1
for digit in s:
product *= int(__a )
return product
def A(__a: str = N ):
lowerCAmelCase_ = -sys.maxsize - 1
lowerCAmelCase_ = n[:13]
lowerCAmelCase_ = 13
while cur_index < len(__a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase_ = max(__a , str_eval(__a ) )
lowerCAmelCase_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : str = "cpu" , snake_case__ : str = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
snake_case : int = device
snake_case : str = CLIPTokenizerFast.from_pretrained(snake_case__ )
snake_case : List[Any] = [0.48145466, 0.4578275, 0.40821073]
snake_case : Optional[int] = [0.26862954, 0.26130258, 0.27577711]
snake_case : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case : List[str] = torchvision.transforms.Resize(2_24 )
snake_case : List[Any] = torchvision.transforms.CenterCrop(2_24 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.resize(snake_case__ )
snake_case : Union[str, Any] = self.center_crop(snake_case__ )
snake_case : str = self.normalize(snake_case__ )
return images
def __call__(self : Optional[int] , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , **snake_case__ : int ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.tokenizer(text=snake_case__ , **snake_case__ )
snake_case : str = self.preprocess_img(snake_case__ )
snake_case : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase ( nn.Module ):
def __init__(self : int , snake_case__ : Tuple=10 , snake_case__ : List[str]=0.01 , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , snake_case__ : Any=False , snake_case__ : Optional[Any]=True , snake_case__ : Any="image" , snake_case__ : str=True , snake_case__ : Any=False , snake_case__ : List[str]=False , snake_case__ : Optional[Any]=False , ) -> None:
'''simple docstring'''
super().__init__()
snake_case : List[str] = None
snake_case : Optional[int] = device if device else get_device()
if vqgan:
snake_case : List[Any] = vqgan
else:
snake_case : Tuple = load_vqgan(self.device , conf_path=snake_case__ , ckpt_path=snake_case__ )
self.vqgan.eval()
if clip:
snake_case : Any = clip
else:
snake_case : Optional[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
snake_case : Optional[Any] = ProcessorGradientFlow(device=self.device )
snake_case : Optional[int] = iterations
snake_case : str = lr
snake_case : List[Any] = log
snake_case : Optional[int] = make_grid
snake_case : str = return_val
snake_case : List[Any] = quantize
snake_case : List[str] = self.vqgan.decoder.z_shape
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Any=5 , snake_case__ : Optional[Any]=True ) -> Any:
'''simple docstring'''
snake_case : List[Any] = []
if output_path is None:
snake_case : List[Any] = "./animation.gif"
if input_path is None:
snake_case : Any = self.save_path
snake_case : int = sorted(glob(input_path + "/*" ) )
if not len(snake_case__ ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(snake_case__ ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
snake_case : List[Any] = total_duration / len(snake_case__ )
snake_case : Union[str, Any] = [frame_duration] * len(snake_case__ )
if extend_frames:
snake_case : Union[str, Any] = 1.5
snake_case : int = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(snake_case__ ) )
imageio.mimsave(snake_case__ , snake_case__ , duration=snake_case__ )
print(f"""gif saved to {output_path}""" )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str=None , snake_case__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
snake_case : Optional[Any] = preprocess(Image.open(snake_case__ ) , target_image_size=2_56 ).to(self.device )
snake_case : Any = preprocess_vqgan(snake_case__ )
snake_case , *snake_case : str = self.vqgan.encode(snake_case__ )
return z
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.latent.detach().requires_grad_()
snake_case : Optional[int] = base_latent + transform_vector
if self.quantize:
snake_case , *snake_case : Any = self.vqgan.quantize(snake_case__ )
else:
snake_case : Union[str, Any] = trans_latent
return self.vqgan.decode(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int]=None ) -> str:
'''simple docstring'''
snake_case : Any = self.clip_preprocessor(text=snake_case__ , images=snake_case__ , return_tensors="pt" , padding=snake_case__ )
snake_case : Optional[int] = self.clip(**snake_case__ )
snake_case : List[Any] = clip_outputs.logits_per_image
if weights is not None:
snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = self._get_clip_similarity(pos_prompts["prompts"] , snake_case__ , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
snake_case : Union[str, Any] = self._get_clip_similarity(neg_prompts["prompts"] , snake_case__ , weights=neg_prompts["weights"] )
else:
snake_case : Union[str, Any] = torch.tensor([1] , device=self.device )
snake_case : List[Any] = -torch.log(snake_case__ ) + torch.log(snake_case__ )
return loss
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = torch.randn_like(self.latent , requires_grad=snake_case__ , device=self.device )
snake_case : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case : Dict = self._add_vector(snake_case__ )
snake_case : Union[str, Any] = loop_post_process(snake_case__ )
snake_case : List[str] = self._get_CLIP_loss(snake_case__ , snake_case__ , snake_case__ )
print("CLIP loss" , snake_case__ )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=snake_case__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
wandb.init(reinit=snake_case__ , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
snake_case : Optional[Any] = Image.open(snake_case__ )
snake_case : Dict = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not prompts:
return []
snake_case : Tuple = []
snake_case : List[str] = []
if isinstance(snake_case__ , snake_case__ ):
snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(snake_case__ , (tuple, list) ):
snake_case : Tuple = prompt[0]
snake_case : Optional[int] = float(prompt[1] )
elif ":" in prompt:
snake_case , snake_case : Optional[int] = prompt.split(":" )
snake_case : List[str] = float(snake_case__ )
else:
snake_case : Optional[Any] = prompt
snake_case : Tuple = 1.0
processed_prompts.append(snake_case__ )
weights.append(snake_case__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(snake_case__ , device=self.device ),
}
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=True , snake_case__ : Any=False , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Dict=None , ) -> Any:
'''simple docstring'''
if image_path:
snake_case : List[Any] = self._get_latent(snake_case__ )
else:
snake_case : Optional[int] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(snake_case__ , snake_case__ , snake_case__ )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case : Optional[Any] = self.process_prompts(snake_case__ )
snake_case : List[Any] = self.process_prompts(snake_case__ )
if save_final and save_path is None:
snake_case : Tuple = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(snake_case__ ):
os.makedirs(snake_case__ )
else:
snake_case : int = save_path + "_" + get_timestamp()
os.makedirs(snake_case__ )
snake_case : Tuple = save_path
snake_case : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(snake_case__ ) )
snake_case : List[str] = loop_post_process(snake_case__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case__ , snake_case__ , snake_case__ ) ):
if show_intermediate:
show_pil(snake_case__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(snake_case__ )} )
if show_final:
show_pil(snake_case__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 59 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
_lowercase : List[str] = "path-to-your-trained-model"
_lowercase : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_lowercase : Union[str, Any] = "A photo of sks dog in a bucket"
_lowercase : Dict = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 21 | '''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase__ = tuple[int, int]
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Optional[int] = vertices
lowercase__ : int = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase__ : List[str] = weight
def snake_case ( self : int ):
lowercase__ : Dict = Graph({min(self.vertices )} , {} )
lowercase__ : Optional[int] = 42
lowercase__ : Optional[Any] = 42
lowercase__ : int = 42
lowercase__ : int = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase__ : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__ : Tuple = edge
lowercase__ : Any = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def __lowerCamelCase ( lowerCamelCase__ = "p107_network.txt" ):
"""simple docstring"""
lowercase__ : int = os.path.abspath(os.path.dirname(_snake_case ) )
lowercase__ : str = os.path.join(_snake_case , _snake_case )
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = 42
lowercase__ : int = 42
lowercase__ : Union[str, Any] = 42
with open(_snake_case ) as f:
lowercase__ : int = f.read().strip().split("\n" )
lowercase__ : Optional[int] = [line.split("," ) for line in data]
for edgea in range(1 , len(_snake_case ) ):
for edgea in range(_snake_case ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__ : Dict = int(adjaceny_matrix[edgea][edgea] )
lowercase__ : str = Graph(set(range(len(_snake_case ) ) ) , _snake_case )
lowercase__ : Optional[int] = graph.prims_algorithm()
lowercase__ : Union[str, Any] = sum(graph.edges.values() )
lowercase__ : Optional[int] = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 130 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a =old_name
if "patch_embed" in old_name:
__a , __a , __a =old_name.split('.' )
if layer == "0":
__a =old_name.replace('0' , 'convolution1' )
elif layer == "1":
__a =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__a =old_name.replace('3' , 'convolution2' )
else:
__a =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _snake_case ):
__a =r'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
__a =re.search(r'\d\.\d\d.' , _snake_case ).group()
else:
__a =re.search(r'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
__a =old_name.replace(_snake_case , '' )
__a =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__a ='intermediate_stages.' + trimmed_name
else:
__a =old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__a =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__a =str(int(match[2] ) - num_meta4D_last_stage )
__a =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__a =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__a =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__a =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__a =trimmed_name.replace('fc2' , 'linear_out' )
__a ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _snake_case ):
__a =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__a =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__a =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__a =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__a =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__a ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a =new_name.replace('norm' , 'layernorm' )
__a ='efficientformer.' + new_name
else:
__a ='efficientformer.encoder.' + new_name
return new_name
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Dict ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__a =checkpoint.pop(_snake_case )
__a =val
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCamelCase_( _snake_case : Path , _snake_case : Path , _snake_case : Path , _snake_case : bool ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )['model']
__a =EfficientFormerConfig.from_json_file(_snake_case )
__a =EfficientFormerForImageClassificationWithTeacher(_snake_case )
__a ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__a =config.depths[-1] - config.num_metaad_blocks + 1
__a =convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__a =prepare_img()
__a =256
__a =224
__a =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__a =Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
__a =image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
__a =model(_snake_case )
__a =outputs.logits
__a =(1, 1000)
if "l1" in model_name:
__a =torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a =torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a =torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 218 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _A ( UpperCamelCase_ : Optional[int]) -> List[str]:
'''simple docstring'''
__lowercase = 384
if "tiny" in model_name:
__lowercase = [3, 3, 9, 3]
__lowercase = [96, 192, 384, 768]
if "small" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [96, 192, 384, 768]
if "base" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [128, 256, 512, 1024]
__lowercase = 512
if "large" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [192, 384, 768, 1536]
__lowercase = 768
if "xlarge" in model_name:
__lowercase = [3, 3, 27, 3]
__lowercase = [256, 512, 1024, 2048]
__lowercase = 1024
# set label information
__lowercase = 150
__lowercase = "huggingface/label-files"
__lowercase = "ade20k-id2label.json"
__lowercase = json.load(open(hf_hub_download(UpperCamelCase_, UpperCamelCase_, repo_type="dataset"), "r"))
__lowercase = {int(UpperCamelCase_): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = ConvNextConfig(
depths=UpperCamelCase_, hidden_sizes=UpperCamelCase_, out_features=["stage1", "stage2", "stage3", "stage4"])
__lowercase = UperNetConfig(
backbone_config=UpperCamelCase_, auxiliary_in_channels=UpperCamelCase_, num_labels=UpperCamelCase_, idalabel=UpperCamelCase_, labelaid=UpperCamelCase_, )
return config
def _A ( UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
__lowercase = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight"))
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias"))
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight"))
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"""))
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
])
# fmt: on
return rename_keys
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : List[str], UpperCamelCase_ : Tuple) -> List[str]:
'''simple docstring'''
__lowercase = dct.pop(UpperCamelCase_)
__lowercase = val
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : Tuple) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__lowercase = model_name_to_url[model_name]
__lowercase = torch.hub.load_state_dict_from_url(UpperCamelCase_, map_location="cpu")["state_dict"]
__lowercase = get_upernet_config(UpperCamelCase_)
__lowercase = UperNetForSemanticSegmentation(UpperCamelCase_)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(UpperCamelCase_)
if "bn" in key:
__lowercase = key.replace("bn", "batch_norm")
__lowercase = val
# rename keys
__lowercase = create_rename_keys(UpperCamelCase_)
for src, dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
model.load_state_dict(UpperCamelCase_)
# verify on image
__lowercase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__lowercase = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_).raw).convert("RGB")
__lowercase = SegformerImageProcessor()
__lowercase = processor(UpperCamelCase_, return_tensors="pt").pixel_values
with torch.no_grad():
__lowercase = model(UpperCamelCase_)
if model_name == "upernet-convnext-tiny":
__lowercase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]])
elif model_name == "upernet-convnext-small":
__lowercase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]])
elif model_name == "upernet-convnext-base":
__lowercase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]])
elif model_name == "upernet-convnext-large":
__lowercase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]])
elif model_name == "upernet-convnext-xlarge":
__lowercase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]])
print("Logits:", outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3], UpperCamelCase_, atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(UpperCamelCase_)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(UpperCamelCase_)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"upernet-convnext-{size}" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 144 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_a = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_a , _a = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_a = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_a = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_a = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 144 | 1 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Image ) -> str:
__A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCAmelCase ( __snake_case : Image ) -> Dict:
__A : Dict = np.array(__snake_case )
__A : List[Any] = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge')
__A : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'facebook/sam-vit-huge'
__A : Optional[Any] = pipeline('mask-generation' , model=_UpperCAmelCase)
__A : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , ) | 190 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 190 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : str ):
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__magic_name__ = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict , **a__ : Dict ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Any , **a__ : int ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict , **a__ : List[str] ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : int ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = self.get_image_processor()
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__magic_name__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__magic_name__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__magic_name__ = '''lower newer'''
__magic_name__ = processor(text=_SCREAMING_SNAKE_CASE )
__magic_name__ = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : str ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__magic_name__ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Dict ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 371 |
'''simple docstring'''
from random import randint, random
def UpperCamelCase ( a , a , a , a = False , a = False , a = 5 , ) -> list:
'''simple docstring'''
__magic_name__ = [[-1] * number_of_cells] # Create a highway without any car
__magic_name__ = 0
__magic_name__ = max(a , 0 )
while i < number_of_cells:
__magic_name__ = (
randint(0 , a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = highway_now[car_index + 1 :]
for cell in range(len(a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a , -1 )
def UpperCamelCase ( a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(a )
# Beforce calculations, the highway is empty
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__magic_name__ = min(highway_now[car_index] + 1 , a )
# Number of empty cell before the next car
__magic_name__ = get_distance(a , a ) - 1
# We can't have the car causing an accident
__magic_name__ = min(next_highway[car_index] , a )
if random() < probability:
# Randomly, a driver will slow down
__magic_name__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase ( a , a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(highway[0] )
for i in range(a ):
__magic_name__ = update(highway[i] , a , a )
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
__magic_name__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__magic_name__ = (car_index + speed) % number_of_cells
# Commit the change of position
__magic_name__ = speed
highway.append(a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=sys.maxsize ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'bilinear'
__lowerCamelCase = max_size
__lowerCamelCase = short_edge_length
def __call__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
for img in imgs:
__lowerCamelCase , __lowerCamelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowerCamelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowerCamelCase = size * 1.0 / min(lowerCamelCase__ , lowerCamelCase__ )
if h < w:
__lowerCamelCase , __lowerCamelCase = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase = scale * h, size
if max(lowerCamelCase__ , lowerCamelCase__ ) > self.max_size:
__lowerCamelCase = self.max_size * 1.0 / max(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = newh * scale
__lowerCamelCase = neww * scale
__lowerCamelCase = int(neww + 0.5 )
__lowerCamelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowerCamelCase = Image.fromarray(lowerCamelCase__ )
__lowerCamelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__lowerCamelCase = np.asarray(lowerCamelCase__ )
else:
__lowerCamelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowerCamelCase = nn.functional.interpolate(
lowerCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCamelCase__ ).squeeze(0 )
img_augs.append(lowerCamelCase__ )
return img_augs
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__lowerCamelCase = cfg.INPUT.FORMAT
__lowerCamelCase = cfg.SIZE_DIVISIBILITY
__lowerCamelCase = cfg.PAD_VALUE
__lowerCamelCase = cfg.INPUT.MAX_SIZE_TEST
__lowerCamelCase = cfg.MODEL.DEVICE
__lowerCamelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase = lambda lowerCamelCase__ : (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = tuple(max(lowerCamelCase__ ) for s in zip(*[img.shape for img in images] ) )
__lowerCamelCase = [im.shape[-2:] for im in images]
__lowerCamelCase = [
nn.functional.pad(
lowerCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return torch.stack(lowerCamelCase__ ), torch.tensor(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = [images]
if single_image:
assert len(lowerCamelCase__ ) == 1
for i in range(len(lowerCamelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCamelCase__ , images.pop(lowerCamelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCamelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCamelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__lowerCamelCase = torch.tensor([im.shape[:2] for im in images] )
__lowerCamelCase = self.aug(lowerCamelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowerCamelCase = [self.normalizer(lowerCamelCase__ ) for x in images]
# now pad them to do the following operations
__lowerCamelCase , __lowerCamelCase = self.pad(lowerCamelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowerCamelCase = torch.true_divide(lowerCamelCase__ , lowerCamelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple[int, int] ) -> Dict:
"""simple docstring"""
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
__lowerCamelCase , __lowerCamelCase = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase__ )
| 90 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( _lowercase : NDArray[floataa] , _lowercase : NDArray[floataa] , _lowercase : list[int] , _lowercase : int , ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : List[Any] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowercase )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowercase )
if rowsa != rowsa:
__UpperCAmelCase : List[str] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != rowsa:
__UpperCAmelCase : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'matrix but received {len(_lowercase )} and {rowsa}'
)
raise ValueError(_lowercase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Dict = table.shape
strictly_diagonally_dominant(_lowercase )
# Iterates the whole matrix for given number of times
for _ in range(_lowercase ):
__UpperCAmelCase : Union[str, Any] = []
for row in range(_lowercase ):
__UpperCAmelCase : int = 0
for col in range(_lowercase ):
if col == row:
__UpperCAmelCase : Optional[Any] = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : Optional[Any] = (temp + val) / denom
new_val.append(_lowercase )
__UpperCAmelCase : List[str] = new_val
return [float(_lowercase ) for i in new_val]
def _a ( _lowercase : NDArray[floataa] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = table.shape
__UpperCAmelCase : Optional[Any] = True
for i in range(0 , _lowercase ):
__UpperCAmelCase : Tuple = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.FloatTensor
class a ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : str , snake_case : int = 32 , snake_case : int = 64 , snake_case : int = 20 , snake_case : int = 768 , snake_case : Tuple=77 , snake_case : List[Any]=4 , snake_case : float = 0.0 , snake_case : str = "silu" , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = "linear" , snake_case : Optional[str] = "prd" , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , ) -> List[str]:
super().__init__()
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : List[str] = attention_head_dim
__UpperCAmelCase : int = num_attention_heads * attention_head_dim
__UpperCAmelCase : List[Any] = additional_embeddings
__UpperCAmelCase : Any = time_embed_dim or inner_dim
__UpperCAmelCase : Any = embedding_proj_dim or embedding_dim
__UpperCAmelCase : Union[str, Any] = clip_embed_dim or embedding_dim
__UpperCAmelCase : List[Any] = Timesteps(snake_case , snake_case , 0 )
__UpperCAmelCase : Optional[Any] = TimestepEmbedding(snake_case , snake_case , out_dim=snake_case , act_fn=snake_case )
__UpperCAmelCase : str = nn.Linear(snake_case , snake_case )
if embedding_proj_norm_type is None:
__UpperCAmelCase : str = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : str = nn.LayerNorm(snake_case )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
__UpperCAmelCase : List[Any] = nn.Linear(snake_case , snake_case )
if encoder_hid_proj_type is None:
__UpperCAmelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : Any = nn.Linear(snake_case , snake_case )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
__UpperCAmelCase : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case ) )
if added_emb_type == "prd":
__UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , 1 , snake_case ) )
elif added_emb_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
__UpperCAmelCase : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
snake_case , snake_case , snake_case , dropout=snake_case , activation_fn='''gelu''' , attention_bias=snake_case , )
for d in range(snake_case )
] )
if norm_in_type == "layer":
__UpperCAmelCase : Tuple = nn.LayerNorm(snake_case )
elif norm_in_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
__UpperCAmelCase : Dict = nn.LayerNorm(snake_case )
__UpperCAmelCase : Any = nn.Linear(snake_case , snake_case )
__UpperCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : str = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , snake_case , persistent=snake_case )
__UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , snake_case ) )
__UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , snake_case ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self : int ) -> Dict[str, AttentionProcessor]:
__UpperCAmelCase : Optional[Any] = {}
def fn_recursive_add_processors(snake_case : str , snake_case : torch.nn.Module , snake_case : Dict[str, AttentionProcessor] ):
if hasattr(snake_case , '''set_processor''' ):
__UpperCAmelCase : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , snake_case , snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case , snake_case , snake_case )
return processors
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(snake_case , snake_case ) and len(snake_case ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(snake_case )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(snake_case : str , snake_case : torch.nn.Module , snake_case : int ):
if hasattr(snake_case , '''set_processor''' ):
if not isinstance(snake_case , snake_case ):
module.set_processor(snake_case )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , snake_case , snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case , snake_case , snake_case )
def lowerCamelCase__ ( self : str ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Union[torch.Tensor, float, int] , snake_case : torch.FloatTensor , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.BoolTensor] = None , snake_case : bool = True , ) -> List[Any]:
__UpperCAmelCase : Any = hidden_states.shape[0]
__UpperCAmelCase : Optional[int] = timestep
if not torch.is_tensor(snake_case ):
__UpperCAmelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : Any = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps * torch.ones(snake_case , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : Tuple = self.time_proj(snake_case )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : int = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : Optional[int] = self.time_embedding(snake_case )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : Optional[Any] = self.embedding_proj_norm(snake_case )
__UpperCAmelCase : str = self.embedding_proj(snake_case )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Dict = self.encoder_hidden_states_proj(snake_case )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCAmelCase : Optional[int] = self.proj_in(snake_case )
__UpperCAmelCase : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Union[str, Any] = hidden_states[:, None, :]
__UpperCAmelCase : Union[str, Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : Any = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case , -1 , -1 )
additional_embeds.append(snake_case )
__UpperCAmelCase : Dict = torch.cat(
snake_case , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : Union[str, Any] = F.pad(
snake_case , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : Optional[int] = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCAmelCase : str = F.pad(snake_case , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : str = self.norm_in(snake_case )
for block in self.transformer_blocks:
__UpperCAmelCase : Optional[int] = block(snake_case , attention_mask=snake_case )
__UpperCAmelCase : int = self.norm_out(snake_case )
if self.prd_embedding is not None:
__UpperCAmelCase : Optional[int] = hidden_states[:, -1]
else:
__UpperCAmelCase : List[Any] = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : Dict = self.proj_to_clip_embeddings(snake_case )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] ) -> str:
__UpperCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 240 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowercase : List[str] = len(lowerCamelCase) - 1
def UpperCamelCase ( self, lowerCamelCase) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree, lowerCamelCase) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCamelCase), 5) == 1
return output_values
def UpperCamelCase ( self, lowerCamelCase) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : Optional[int] = self.basis_function(lowerCamelCase)
_lowercase : Optional[int] = 0.0
_lowercase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase ( self, lowerCamelCase = 0.0_1) -> int:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
_lowercase : list[float] = [] # x coordinates of points to plot
_lowercase : list[float] = [] # y coordinates of points to plot
_lowercase : str = 0.0
while t <= 1:
_lowercase : List[Any] = self.bezier_curve_function(lowerCamelCase)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
_lowercase : List[str] = [i[0] for i in self.list_of_points]
_lowercase : Dict = [i[1] for i in self.list_of_points]
plt.plot(
lowerCamelCase, lowerCamelCase, color='blue', label='Curve of Degree ' + str(self.degree), )
plt.scatter(lowerCamelCase, lowerCamelCase, color='red', label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 21 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "x" , UpperCAmelCase = 10**-10 , UpperCAmelCase = 1 , ) -> complex:
snake_case_ = symbols(UpperCAmelCase )
snake_case_ = lambdify(UpperCAmelCase , UpperCAmelCase )
snake_case_ = lambdify(UpperCAmelCase , diff(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = starting_point
while True:
if diff_function(UpperCAmelCase ) != 0:
snake_case_ = prev_guess - multiplicity * func(UpperCAmelCase ) / diff_function(
UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 353 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312 | 0 |
"""simple docstring"""
import cmath
import math
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> complex:
lowerCamelCase_ : int =math.radians(lowerCamelCase__ )
lowerCamelCase_ : List[str] =math.radians(lowerCamelCase__ )
# Convert voltage and current to rectangular form
lowerCamelCase_ : Tuple =cmath.rect(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Dict =cmath.rect(lowerCamelCase__ , lowerCamelCase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase :List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase :List[str] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase :Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase :str = False
@property
def UpperCAmelCase__ ( self : Tuple ):
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : int ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return 100
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] ={
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase_ : Union[str, Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self : Any ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ : int =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =self.dummy_unet
lowerCamelCase_ : Optional[Any] =self.dummy_movq
lowerCamelCase_ : Optional[Any] ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase_ : Optional[Any] =DDIMScheduler(**snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str , snake_case__ : str=0 ):
lowerCamelCase_ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCamelCase_ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCamelCase_ : List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCamelCase_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Tuple =Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
# create hint
lowerCamelCase_ : Dict =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : List[Any] =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : List[str] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : Dict ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any ="cpu"
lowerCamelCase_ : Dict =self.get_dummy_components()
lowerCamelCase_ : Dict =self.pipeline_class(**snake_case__ )
lowerCamelCase_ : str =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[Any] =pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Dict =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Union[str, Any] =np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCamelCase_ : Optional[int] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase_ : Optional[int] =init_image.resize((512, 512) )
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCamelCase_ : Any =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
lowerCamelCase_ : Union[str, Any] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ : str ="A robot, 4k photo"
lowerCamelCase_ : List[Any] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCamelCase_ : Any =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCamelCase_ : List[str] =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple =torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ : Tuple =pipe_prior(
snake_case__ , image=snake_case__ , strength=0.85 , generator=snake_case__ , negative_prompt="" , ).to_tuple()
lowerCamelCase_ : str =pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
lowerCamelCase_ : Optional[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 144 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """facebook/bart-large-mnli"""
lowerCAmelCase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCAmelCase_ = """text_classifier"""
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ["""text""", ["""text"""]]
lowerCAmelCase_ = ["""text"""]
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
UpperCamelCase = int(A_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCAmelCase_ ( self , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = labels
return self.pre_processor(
[text] * len(A_ ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 251 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 251 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='speech_to_text'
__a =['past_key_values']
__a ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , __a : Dict=1_00_00 , __a : Any=12 , __a : Optional[Any]=20_48 , __a : List[str]=4 , __a : Any=6 , __a : str=20_48 , __a : Optional[Any]=4 , __a : Tuple=0.0 , __a : str=0.0 , __a : int=True , __a : Union[str, Any]=True , __a : Any="relu" , __a : Optional[int]=2_56 , __a : Optional[Any]=0.1 , __a : Optional[int]=0.0 , __a : List[Any]=0.0 , __a : Dict=0.02 , __a : Any=2 , __a : Union[str, Any]=True , __a : Optional[Any]=1 , __a : Dict=0 , __a : Optional[int]=2 , __a : Dict=60_00 , __a : Dict=10_24 , __a : Optional[int]=2 , __a : int=(5, 5) , __a : int=10_24 , __a : str=80 , __a : Any=1 , **__a : Dict , ):
_a = vocab_size
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = max_source_positions
_a = max_target_positions
_a = num_conv_layers
_a = list(__a )
_a = conv_channels
_a = input_feat_per_channel
_a = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 63 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""yjernite/retribert-base-uncased""": 5_12,
}
__lowerCamelCase = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( A_ ):
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = RetriBertTokenizer
A__ : Tuple = ["input_ids", "attention_mask"]
def __init__(self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[int]=None , snake_case__ : str=True , snake_case__ : Dict="[UNK]" , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : List[str]="[MASK]" , snake_case__ : str=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : str = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : Union[str, Any] = tokenize_chinese_chars
snake_case : Dict = normalizer_class(**snake_case__ )
snake_case : Any = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : Optional[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 10 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class snake_case_ :
def __init__( self :Dict ) -> List[Any]:
a__ = {}
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :int=1 ) -> Optional[int]:
if self.graph.get(__snake_case ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
a__ = [[w, v]]
if not self.graph.get(__snake_case ):
a__ = []
def lowerCamelCase__( self :str ) -> List[Any]:
return list(self.graph )
def lowerCamelCase__( self :List[Any] ,__snake_case :List[Any] ,__snake_case :Optional[Any] ) -> Any:
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Any=-2 ,__snake_case :Optional[int]=-1 ) -> Optional[Any]:
if s == d:
return []
a__ = []
a__ = []
if s == -2:
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def lowerCamelCase__( self :Dict ,__snake_case :Any=-1 ) -> List[str]:
if c == -1:
a__ = floor(random() * 1_00_00 ) + 10
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
a__ = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case ,__snake_case ,1 )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int]=-2 ) -> Optional[Any]:
a__ = deque()
a__ = []
if s == -2:
a__ = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
a__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[Any] ) -> List[str]:
a__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase__( self :int ,__snake_case :List[str] ) -> int:
return len(self.graph[u] )
def lowerCamelCase__( self :List[str] ,__snake_case :Any=-2 ) -> Union[str, Any]:
a__ = []
a__ = []
if s == -2:
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = s
a__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return sorted_nodes
def lowerCamelCase__( self :Any ) -> Dict:
a__ = []
a__ = []
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = -2
a__ = []
a__ = s
a__ = False
a__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a__ = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ = True
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = False
indirect_parents.append(__snake_case )
a__ = s
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = []
a__ = []
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = -2
a__ = []
a__ = s
a__ = False
a__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a__ = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ = True
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = False
indirect_parents.append(__snake_case )
a__ = s
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def lowerCamelCase__( self :Any ,__snake_case :Tuple=-2 ,__snake_case :Optional[int]=-1 ) -> str:
a__ = time()
self.dfs(__snake_case ,__snake_case )
a__ = time()
return end - begin
def lowerCamelCase__( self :Dict ,__snake_case :Optional[int]=-2 ) -> Optional[Any]:
a__ = time()
self.bfs(__snake_case )
a__ = time()
return end - begin
class snake_case_ :
def __init__( self :List[Any] ) -> Dict:
a__ = {}
def lowerCamelCase__( self :Optional[int] ,__snake_case :Dict ,__snake_case :int ,__snake_case :Tuple=1 ) -> Optional[Any]:
# check if the u exists
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
a__ = [[w, v]]
# add the other way
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
a__ = [[w, u]]
def lowerCamelCase__( self :int ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ) -> int:
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
# the other way round
if self.graph.get(__snake_case ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__snake_case )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int=-2 ,__snake_case :Any=-1 ) -> Optional[Any]:
if s == d:
return []
a__ = []
a__ = []
if s == -2:
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def lowerCamelCase__( self :int ,__snake_case :Any=-1 ) -> Optional[int]:
if c == -1:
a__ = floor(random() * 1_00_00 ) + 10
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
a__ = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case ,__snake_case ,1 )
def lowerCamelCase__( self :Any ,__snake_case :Optional[Any]=-2 ) -> Optional[int]:
a__ = deque()
a__ = []
if s == -2:
a__ = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
a__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ) -> List[str]:
return len(self.graph[u] )
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = []
a__ = []
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = -2
a__ = []
a__ = s
a__ = False
a__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a__ = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ = True
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = False
indirect_parents.append(__snake_case )
a__ = s
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = []
a__ = []
a__ = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
a__ = -2
a__ = []
a__ = s
a__ = False
a__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
a__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
a__ = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
a__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ = True
if len(__snake_case ) != 0:
a__ = stack[len(__snake_case ) - 1]
else:
a__ = False
indirect_parents.append(__snake_case )
a__ = s
a__ = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
return list(self.graph )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str=-2 ,__snake_case :List[str]=-1 ) -> List[str]:
a__ = time()
self.dfs(__snake_case ,__snake_case )
a__ = time()
return end - begin
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int=-2 ) -> str:
a__ = time()
self.bfs(__snake_case )
a__ = time()
return end - begin
| 240 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
# Construct model
if gpta_config_file == "":
a__ = GPTaConfig()
else:
a__ = GPTaConfig.from_json_file(__lowerCAmelCase )
a__ = GPTaModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
a__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
snake_case : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 240 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
lowercase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
A : List[Any] = ["input_ids", "attention_mask"]
A : Optional[int] = DistilBertTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Tuple="[SEP]" , _lowerCAmelCase : Any="[PAD]" , _lowerCAmelCase : int="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : str=True , _lowerCAmelCase : str=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__snake_case : Union[str, Any] = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Union[str, Any] = tokenize_chinese_chars
__snake_case : Tuple = normalizer_class(**_lowerCAmelCase )
__snake_case : Optional[int] = do_lower_case
def snake_case__ ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple=None ):
__snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : List[Any] = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 20 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A_ ( snake_case_ ):
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=1_0_2_4 , UpperCAmelCase : List[str]=1_0_2_4 , UpperCAmelCase : List[Any]=3.6 ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = tokenizer
__lowerCAmelCase: Any = tokenizer.bos_token_id
__lowerCAmelCase: Optional[int] = dataset
__lowerCAmelCase: Dict = seq_length
__lowerCAmelCase: Dict = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Dict ) -> int:
__lowerCAmelCase: int = iter(self.dataset )
__lowerCAmelCase: List[str] = True
while more_examples:
__lowerCAmelCase , __lowerCAmelCase: int = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCAmelCase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCAmelCase: Union[str, Any] = False
break
__lowerCAmelCase: Optional[Any] = tokenizer(UpperCAmelCase , truncation=UpperCAmelCase )['input_ids']
__lowerCAmelCase: List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCAmelCase ) , self.seq_length ):
__lowerCAmelCase: int = all_token_ids[i : i + self.seq_length]
if len(UpperCAmelCase ) == self.seq_length:
yield torch.tensor(UpperCAmelCase )
def _a ( SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = {'streaming': True}
__lowerCAmelCase: int = load_dataset(args.dataset_name , split='train' , **__UpperCamelCase )
__lowerCAmelCase: List[Any] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
__lowerCAmelCase: Tuple = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
model.eval()
__lowerCAmelCase: int = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
__lowerCAmelCase: List[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
__lowerCAmelCase: Any = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCAmelCase: Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
__lowerCAmelCase: Any = torch.exp(__UpperCamelCase )
except OverflowError:
__lowerCAmelCase: List[str] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_a = Accelerator()
# Parse configuration
_a = HfArgumentParser(EvaluationArguments)
_a = parser.parse_args()
set_seed(args.seed)
# Logging
_a = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
_a = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_a = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_a = create_dataloader(args)
# Prepare everything with our `accelerator`.
_a = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
_a = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 322 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase ) | 312 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
snake_case_ = x
snake_case_ = y
for step in range(UpperCAmelCase ): # noqa: B007
snake_case_ = a * a - b * b + x
snake_case_ = 2 * a * b + y
snake_case_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCAmelCase ( UpperCAmelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCAmelCase ( UpperCAmelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase , 1 , 1 ) )
def UpperCAmelCase ( UpperCAmelCase = 800 , UpperCAmelCase = 600 , UpperCAmelCase = -0.6 , UpperCAmelCase = 0 , UpperCAmelCase = 3.2 , UpperCAmelCase = 50 , UpperCAmelCase = True , ) -> Image.Image:
snake_case_ = Image.new('RGB' , (image_width, image_height) )
snake_case_ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase ):
for image_y in range(UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ = figure_width / image_width * image_height
snake_case_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ = get_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ = get_color_coded_rgb(UpperCAmelCase )
else:
snake_case_ = get_black_and_white_rgb(UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__UpperCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 312 | """simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
UpperCamelCase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCamelCase_ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
UpperCamelCase_ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
], )
def UpperCamelCase_ ( self, A=None, A=None, A=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(A, A )["wer"]
else:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = 0
for prediction, reference in zip(A, A ):
SCREAMING_SNAKE_CASE : List[str] = compute_measures(A, A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 251 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
UpperCamelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
with open(__UpperCamelCase ,'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(__UpperCamelCase )
return im.convert('RGB' )
@dataclass
class _a :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the training data.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the validation data.'''} )
A : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A : str = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name or path of preprocessor config.'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' ,__UpperCamelCase ,__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task='image-classification' ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(data_args.train_dir ,'**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.validation_dir ,'**' )
SCREAMING_SNAKE_CASE : str = load_dataset(
'imagefolder' ,data_files=__UpperCamelCase ,cache_dir=model_args.cache_dir ,task='image-classification' ,)
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Tuple = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,__UpperCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : int = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : Optional[int] = split['train']
SCREAMING_SNAKE_CASE : int = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : int = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = {}, {}
for i, label in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : Any = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase: Dict ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel=__UpperCamelCase ,finetuning_task='image-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : List[Any] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Dict = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
RandomResizedCrop(__UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
Resize(__UpperCamelCase ),
CenterCrop(__UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__UpperCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(__UpperCamelCase: Dict ):
SCREAMING_SNAKE_CASE : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Tuple = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__UpperCamelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE : List[Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=dataset['train'] if training_args.do_train else None ,eval_dataset=dataset['validation'] if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Any = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = last_checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('eval' ,__UpperCamelCase )
trainer.save_metrics('eval' ,__UpperCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 251 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger()
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : LevitConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase : str = timm.create_model("levit_128s" , pretrained=_a )
else:
__UpperCamelCase : Optional[Any] = timm.create_model("levit_128" , pretrained=_a )
if hidden_sizes == 1_92:
__UpperCamelCase : List[str] = timm.create_model("levit_192" , pretrained=_a )
if hidden_sizes == 2_56:
__UpperCamelCase : List[Any] = timm.create_model("levit_256" , pretrained=_a )
if hidden_sizes == 3_84:
__UpperCamelCase : Any = timm.create_model("levit_384" , pretrained=_a )
from_model.eval()
__UpperCamelCase : Dict = LevitForImageClassificationWithTeacher(_a ).eval()
__UpperCamelCase : List[Any] = OrderedDict()
__UpperCamelCase : Optional[int] = from_model.state_dict()
__UpperCamelCase : int = list(from_model.state_dict().keys() )
__UpperCamelCase : Optional[Any] = list(our_model.state_dict().keys() )
print(len(_a ) , len(_a ) )
for i in range(len(_a ) ):
__UpperCamelCase : Dict = weights[og_keys[i]]
our_model.load_state_dict(_a )
__UpperCamelCase : int = torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase : List[str] = from_model(_a )
__UpperCamelCase : Any = our_model(_a ).logits
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
__UpperCamelCase : str = name
print(_a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
__UpperCamelCase : Tuple = "imagenet-1k-id2label.json"
__UpperCamelCase : Union[str, Any] = 10_00
__UpperCamelCase : Any = (1, num_labels)
__UpperCamelCase : Optional[Any] = "huggingface/label-files"
__UpperCamelCase : int = num_labels
__UpperCamelCase : Dict = json.load(open(hf_hub_download(_a , _a , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(_a ): v for k, v in idalabel.items()}
__UpperCamelCase : List[Any] = idalabel
__UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Optional[Any] = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
__UpperCamelCase : Tuple = {
"levit-128S": 1_28,
"levit-128": 1_28,
"levit-192": 1_92,
"levit-256": 2_56,
"levit-384": 3_84,
}
__UpperCamelCase : Optional[int] = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _a , names_to_config[model_name] , _a , _a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _a , _a , _a , _a )
return config, expected_shape
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowercase : str = parser.parse_args()
lowercase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 364 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase : Dict = (720, 1280) # Height, Width
lowercase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase : Tuple = 1 / 100
lowercase : Optional[int] = ""
lowercase : Any = ""
lowercase : Union[str, Any] = ""
lowercase : str = 250
def UpperCAmelCase_ ():
__UpperCamelCase , __UpperCamelCase : Dict = get_dataset(_lowerCAmelCase , _lowerCAmelCase )
for index in range(_lowerCAmelCase ):
__UpperCamelCase : Optional[int] = random.sample(range(len(_lowerCAmelCase ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = update_image_and_anno(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase : List[str] = random_chars(32 )
__UpperCamelCase : int = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__UpperCamelCase : Dict = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCamelCase : str = []
for anno in new_annos:
__UpperCamelCase : List[str] = anno[3] - anno[1]
__UpperCamelCase : Union[str, Any] = anno[4] - anno[2]
__UpperCamelCase : List[str] = anno[1] + width / 2
__UpperCamelCase : str = anno[2] + height / 2
__UpperCamelCase : List[str] = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowerCAmelCase )
with open(F'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str ):
__UpperCamelCase : int = []
__UpperCamelCase : Dict = []
for label_file in glob.glob(os.path.join(_lowerCAmelCase , "*.txt" ) ):
__UpperCamelCase : Any = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCAmelCase ) as in_file:
__UpperCamelCase : Any = in_file.readlines()
__UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , F'''{label_name}.jpg''' )
__UpperCamelCase : Dict = []
for obj_list in obj_lists:
__UpperCamelCase : Optional[int] = obj_list.rstrip("\n" ).split(" " )
__UpperCamelCase : int = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase : Any = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase : Optional[int] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCAmelCase )
labels.append(_lowerCAmelCase )
return img_paths, labels
def UpperCAmelCase_ (_lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : list[int] , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : float = 0.0 , ):
__UpperCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase : Any = int(scale_x * output_size[1] )
__UpperCamelCase : str = int(scale_y * output_size[0] )
__UpperCamelCase : str = []
__UpperCamelCase : Optional[int] = []
for i, index in enumerate(_lowerCAmelCase ):
__UpperCamelCase : Any = all_img_list[index]
path_list.append(_lowerCAmelCase )
__UpperCamelCase : Dict = all_annos[index]
__UpperCamelCase : Any = cva.imread(_lowerCAmelCase )
if i == 0: # top-left
__UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) )
__UpperCamelCase : Optional[int] = img
for bbox in img_annos:
__UpperCamelCase : List[str] = bbox[1] * scale_x
__UpperCamelCase : Dict = bbox[2] * scale_y
__UpperCamelCase : Optional[Any] = bbox[3] * scale_x
__UpperCamelCase : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase : Optional[Any] = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase : Dict = img
for bbox in img_annos:
__UpperCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase : str = bbox[2] * scale_y
__UpperCamelCase : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase : Tuple = img
for bbox in img_annos:
__UpperCamelCase : List[Any] = bbox[1] * scale_x
__UpperCamelCase : str = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase : Dict = bbox[3] * scale_x
__UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase : List[Any] = cva.resize(
_lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase : Tuple = img
for bbox in img_annos:
__UpperCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase : Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase_ (_lowerCAmelCase : int ):
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅") | 171 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 1 |
def _lowercase ( __lowerCAmelCase ) -> bool:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__lowerCAmelCase ) == 1:
return True
SCREAMING_SNAKE_CASE__ : Tuple = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowercase ( __lowerCAmelCase ) -> float:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
SCREAMING_SNAKE_CASE__ : Dict = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase : Any = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase : str = features.copy() if features else default_expected_features
lowercase : Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
lowercase : Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Any = tmp_path / """cache"""
lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase : List[str] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Dict = tmp_path / """cache"""
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase : Optional[int] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : str = tmp_path / """cache"""
lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 20 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( lowerCAmelCase ):
_a : BigBirdConfig
_a : jnp.dtype= jnp.floataa
_a : bool= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setup()
lowercase : List[str] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = super().__call__(*snake_case ,**snake_case )
lowercase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= FlaxBigBirdForNaturalQuestionsModule
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
def cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : int = logits.shape[-1]
lowercase : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" )
lowercase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowercase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase : Any = reduction(SCREAMING_SNAKE_CASE__ )
return loss
lowercase : Optional[Any] = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
lowercase : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
_a : str= "google/bigbird-roberta-base"
_a : int= 3000
_a : int= 1_0500
_a : int= 128
_a : int= 3
_a : int= 1
_a : int= 5
# tx_args
_a : float= 3E-5
_a : float= 0.0
_a : int= 2_0000
_a : float= 0.00_95
_a : str= "bigbird-roberta-natural-questions"
_a : str= "training-expt"
_a : str= "data/nq-training.jsonl"
_a : str= "data/nq-validation.jsonl"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=snake_case )
lowercase : Optional[int] = os.path.join(self.base_dir ,self.save_dir )
lowercase : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
_a : int
_a : int= 4096 # no dynamic padding on TPUs
def __call__( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.collate_fn(snake_case )
lowercase : Union[str, Any] = jax.tree_util.tree_map(snake_case ,snake_case )
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
lowercase : Tuple = {
"""input_ids""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any:
if seed is not None:
lowercase : Optional[int] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
lowercase : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
def loss_fn(SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = model_inputs.pop("""start_labels""" )
lowercase : Optional[int] = model_inputs.pop("""end_labels""" )
lowercase : str = model_inputs.pop("""pooled_labels""" )
lowercase : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[str] = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
lowercase , lowercase : int = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = grad_fn(state.params )
lowercase : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase : List[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" )
lowercase : str = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = model_inputs.pop("""start_labels""" )
lowercase : Dict = model_inputs.pop("""end_labels""" )
lowercase : Optional[Any] = model_inputs.pop("""pooled_labels""" )
lowercase : Optional[int] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[Any] = outputs
lowercase : Dict = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __snake_case ( train_state.TrainState ):
_a : Callable= struct.field(pytree_node=lowerCAmelCase )
@dataclass
class __snake_case :
_a : Args
_a : Callable
_a : Callable
_a : Callable
_a : Callable
_a : wandb
_a : Callable= None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = model.params
lowercase : Any = TrainState.create(
apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,loss_fn=snake_case ,)
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = restore_checkpoint(snake_case ,snake_case )
lowercase : List[str] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase , lowercase : Tuple = build_tx(**snake_case )
lowercase : str = train_state.TrainState(
step=snake_case ,apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,opt_state=snake_case ,)
lowercase : Any = args
lowercase : Optional[Any] = data_collator
lowercase : List[str] = lr
lowercase : str = params
lowercase : Tuple = jax_utils.replicate(snake_case )
return state
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.args
lowercase : Optional[Any] = len(snake_case ) // args.batch_size
lowercase : int = jax.random.PRNGKey(0 )
lowercase : List[str] = jax.random.split(snake_case ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : List[str] = get_batched_dataset(snake_case ,args.batch_size ,seed=snake_case )
lowercase : int = 0
for batch in tqdm(snake_case ,total=snake_case ,desc=f"Running EPOCH-{epoch}" ):
lowercase : Dict = self.data_collator(snake_case )
lowercase , lowercase , lowercase : Optional[int] = self.train_step_fn(snake_case ,snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowercase : Optional[Any] = jax_utils.unreplicate(state.step )
lowercase : List[str] = running_loss.item() / i
lowercase : List[str] = self.scheduler_fn(state_step - 1 )
lowercase : int = self.evaluate(snake_case ,snake_case )
lowercase : Tuple = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case ,commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = get_batched_dataset(snake_case ,self.args.batch_size )
lowercase : Any = len(snake_case ) // self.args.batch_size
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : Optional[int] = 0
for batch in tqdm(snake_case ,total=snake_case ,desc="""Evaluating ... """ ):
lowercase : Tuple = self.data_collator(snake_case )
lowercase : Optional[int] = self.val_step_fn(snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = jax_utils.unreplicate(snake_case )
print(f"SAVING CHECKPOINT IN {save_dir}" ,end=""" ... """ )
self.model_save_fn(snake_case ,params=state.params )
with open(os.path.join(snake_case ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(snake_case ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(snake_case ,"""data_collator.joblib""" ) )
with open(os.path.join(snake_case ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,snake_case )
print("""DONE""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase : str = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase : Optional[int] = from_bytes(state.opt_state , f.read() )
lowercase : Optional[Any] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) )
lowercase : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f:
lowercase : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : List[str] = num_train_steps - warmup_steps
lowercase : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
def weight_decay_mask(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 20 | 1 |
'''simple docstring'''
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase :Tuple = "Create a default config file for Accelerate with only a few flags set."
def _a ( _lowercase : List[Any]="no" , _lowercase : str = default_json_config_file , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_lowercase )
path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase : int = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase : Optional[Any] = torch.cuda.device_count()
__UpperCAmelCase : List[str] = num_gpus
__UpperCAmelCase : int = False
if num_gpus > 1:
__UpperCAmelCase : Any = '''MULTI_GPU'''
else:
__UpperCAmelCase : int = '''NO'''
elif is_xpu_available() and use_xpu:
__UpperCAmelCase : List[Any] = torch.xpu.device_count()
__UpperCAmelCase : List[Any] = num_xpus
__UpperCAmelCase : Optional[int] = False
if num_xpus > 1:
__UpperCAmelCase : Any = '''MULTI_XPU'''
else:
__UpperCAmelCase : Optional[Any] = '''NO'''
elif is_npu_available():
__UpperCAmelCase : Dict = torch.npu.device_count()
__UpperCAmelCase : Any = num_npus
__UpperCAmelCase : Any = False
if num_npus > 1:
__UpperCAmelCase : Dict = '''MULTI_NPU'''
else:
__UpperCAmelCase : Optional[int] = '''NO'''
else:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Tuple = '''NO'''
__UpperCAmelCase : List[Any] = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def _a ( _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parser.add_parser('''default''' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase )
parser.add_argument(
'''--config_file''' , default=_lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowercase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowercase )
return parser
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 357 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : Dict=True , snake_case : Any=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Any=5 , snake_case : List[str]=4 , snake_case : List[str]=37 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : int=16 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.02 , snake_case : Any=False , snake_case : int=True , snake_case : Union[str, Any]="None" , snake_case : str=3 , snake_case : Union[str, Any]=4 , snake_case : Any=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : Dict = position_biased_input
__UpperCAmelCase : Optional[int] = pos_att_type
__UpperCAmelCase : Dict = scope
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = self.get_config()
__UpperCAmelCase : Dict = 300
return config
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = DebertaModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Optional[int] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : str , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : List[Any] = DebertaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DebertaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = DebertaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = DebertaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Dict ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = DebertaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
pass
@slow
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__UpperCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' ) | 240 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = CustomTokenizer
pass | 312 |
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ : int =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_60_00 ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase ) <= sample_length:
return wav
_lowerCAmelCase = randint(0 , len(lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowercase: Optional[str] = field(default=snake_case_ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
_lowercase: str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_lowercase: str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_lowercase: str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
_lowercase: str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
_lowercase: Optional[int] = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase: Optional[int] = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowercase: float = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class UpperCAmelCase :
_lowercase: str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
_lowercase: str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
_lowercase: bool = field(
default=snake_case_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase: Optional[bool] = field(
default=snake_case_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase__ ( self : str ) -> Optional[int]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , __snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def UpperCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase = DatasetDict()
_lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"""Make sure to set `--label_column_name` to the correct text column - one of """
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase ):
_lowerCAmelCase = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase )
_lowerCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase = {model_input_name: inputs.get(lowerCAmelCase )}
_lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase ):
_lowerCAmelCase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase = {model_input_name: inputs.get(lowerCAmelCase )}
_lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase = raw_datasets["""train"""].features[data_args.label_column_name].names
_lowerCAmelCase , _lowerCAmelCase = {}, {}
for i, label in enumerate(lowerCAmelCase ):
_lowerCAmelCase = str(lowerCAmelCase )
_lowerCAmelCase = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase ):
_lowerCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=eval_pred.label_ids )
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
# Initialize our trainer
_lowerCAmelCase = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase = last_checkpoint
_lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCAmelCase )
trainer.save_metrics("""eval""" , lowerCAmelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 362 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(4 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
_lowerCAmelCase = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
_lowerCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(__snake_case ):
_lowerCAmelCase = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
_lowerCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 220 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Any = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : int = False
def A ( self : Dict , A : Dict , A : List[Any] , A : Tuple=False ) -> int:
lowercase_ : str = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
lowercase_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _A ):
def __init__( self : Any , A : int , A : int=13 , A : Any=7 , A : Optional[Any]=True , A : List[str]=True , A : List[Any]=True , A : Dict=True , A : List[str]=99 , A : str=32 , A : str=32 , A : List[Any]=2 , A : Tuple=4 , A : Optional[Any]=37 , A : Tuple="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : Optional[int]=5_12 , A : List[Any]=16 , A : Optional[Any]=2 , A : Any=0.02 , A : List[str]=3 , A : Dict=4 , A : Tuple=None , ) -> List[Any]:
lowercase_ : Optional[int] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : List[str] = is_training
lowercase_ : str = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Optional[int] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : str = initializer_range
lowercase_ : str = num_labels
lowercase_ : Optional[Any] = num_choices
lowercase_ : Any = scope
lowercase_ : int = embedding_size
def A ( self : List[Any] ) -> Any:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Any = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : int = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Any = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] , A : List[str] , A : Optional[int] , A : str , A : Union[str, Any] , A : List[str] , A : Optional[Any] , A : Union[str, Any] ) -> Optional[Any]:
lowercase_ : Optional[int] = TFMobileBertModel(config=A )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Tuple = model(A )
lowercase_ : Optional[int] = [input_ids, input_mask]
lowercase_ : int = model(A )
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , A : int , A : int , A : int , A : int , A : Optional[int] , A : List[str] , A : Dict ) -> List[Any]:
lowercase_ : List[Any] = TFMobileBertForMaskedLM(config=A )
lowercase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , A : Optional[Any] , A : int , A : Dict , A : Union[str, Any] , A : List[Any] , A : List[str] , A : List[Any] ) -> Union[str, Any]:
lowercase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=A )
lowercase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : Any , A : List[str] , A : Union[str, Any] , A : Any , A : Any , A : Tuple , A : Optional[Any] , A : str ) -> List[str]:
lowercase_ : Optional[Any] = TFMobileBertForPreTraining(config=A )
lowercase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : List[Any] = model(A )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Dict , A : Optional[int] , A : List[Any] , A : List[Any] , A : Union[str, Any] , A : Optional[Any] , A : Any , A : str ) -> str:
lowercase_ : Tuple = self.num_labels
lowercase_ : Optional[Any] = TFMobileBertForSequenceClassification(config=A )
lowercase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] , A : Optional[int] , A : Union[str, Any] , A : int , A : Optional[int] , A : List[Any] , A : Any , A : Tuple ) -> str:
lowercase_ : Tuple = self.num_choices
lowercase_ : List[str] = TFMobileBertForMultipleChoice(config=A )
lowercase_ : Tuple = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Dict = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : List[str] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple , A : Optional[int] , A : Optional[Any] , A : Any , A : Dict , A : Optional[int] , A : Dict , A : str ) -> str:
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Optional[int] = TFMobileBertForTokenClassification(config=A )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Tuple = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict , A : Dict , A : Optional[Any] , A : List[Any] , A : Union[str, Any] , A : List[str] , A : int , A : str ) -> Optional[Any]:
lowercase_ : Optional[int] = TFMobileBertForQuestionAnswering(config=A )
lowercase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = config_and_inputs
lowercase_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A ( self : Tuple ) -> Tuple:
lowercase_ : List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> Tuple:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A )
def A ( self : List[Any] ) -> Union[str, Any]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A )
def A ( self : List[str] ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A )
def A ( self : str ) -> Any:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A )
def A ( self : Optional[int] ) -> int:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A )
def A ( self : str ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A )
def A ( self : str ) -> Any:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A )
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowercase_ : Any = TFMobileBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Any = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[Any] = model(A )[0]
lowercase_ : Optional[int] = [1, 6, 3_05_22]
self.assertEqual(output.shape , A )
lowercase_ : Tuple = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1e-4 )
| 33 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : List[Any] = size if size is not None else {"""height""": 384, """width""": 384}
UpperCAmelCase__ : Optional[int] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = do_resize
UpperCAmelCase__ : Dict = size
UpperCAmelCase__ : Optional[Any] = resample
UpperCAmelCase__ : Optional[Any] = do_rescale
UpperCAmelCase__ : List[str] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase__ : Optional[int] = do_convert_rgb
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase__ : Dict = (size["""height"""], size["""width"""])
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase__ : Optional[int] = size if size is not None else self.size
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Any = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase__ : Dict = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[str] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ : Union[str, Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCAmelCase__ : List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=_lowerCamelCase )
return encoded_outputs
| 171 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_snake_case , )
assert hasattr(self , "env" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any]=1 ) -> Dict:
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Dict ) -> int:
TrainingJobAnalytics(_snake_case ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : int = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCamelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _snake_case )
| 357 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = ObjectDetectionPipeline(model=lowercase_, image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''', threshold=0.0 )
self.assertGreater(len(lowercase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_, {
'''score''': ANY(lowercase_ ),
'''label''': ANY(lowercase_ ),
'''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )},
}, )
import datasets
UpperCamelCase__ : Dict = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''', '''image''', split='''test''' )
UpperCamelCase__ : str = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
UpperCamelCase__ : Union[str, Any] = object_detector(lowercase_, threshold=0.0 )
self.assertEqual(len(lowercase_ ), len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_, {
'''score''': ANY(lowercase_ ),
'''label''': ANY(lowercase_ ),
'''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )},
}, )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
UpperCamelCase__ : str = AutoModelForObjectDetection.from_pretrained(lowercase_ )
UpperCamelCase__ : Any = AutoFeatureExtractor.from_pretrained(lowercase_ )
UpperCamelCase__ : Union[str, Any] = ObjectDetectionPipeline(model=lowercase_, feature_extractor=lowercase_ )
UpperCamelCase__ : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
], )
UpperCamelCase__ : List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
], )
@require_torch
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = '''facebook/detr-resnet-50'''
UpperCamelCase__ : Tuple = AutoModelForObjectDetection.from_pretrained(lowercase_ )
UpperCamelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(lowercase_ )
UpperCamelCase__ : str = ObjectDetectionPipeline(model=lowercase_, feature_extractor=lowercase_ )
UpperCamelCase__ : Any = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
UpperCamelCase__ : List[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = '''facebook/detr-resnet-50'''
UpperCamelCase__ : Tuple = pipeline('''object-detection''', model=lowercase_ )
UpperCamelCase__ : int = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
UpperCamelCase__ : Optional[int] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = 0.9985
UpperCamelCase__ : List[str] = '''facebook/detr-resnet-50'''
UpperCamelCase__ : int = pipeline('''object-detection''', model=lowercase_ )
UpperCamelCase__ : str = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
@require_torch
@require_pytesseract
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = '''Narsil/layoutlmv3-finetuned-funsd'''
UpperCamelCase__ : Dict = 0.9993
UpperCamelCase__ : Any = pipeline('''object-detection''', model=lowercase_, threshold=lowercase_ )
UpperCamelCase__ : List[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowercase_, decimals=4 ), [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
], )
| 201 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Dict = logging.get_logger(__name__)
a : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _lowerCamelCase ):
snake_case_ = "marian"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ):
snake_case_ = vocab_size
snake_case_ = decoder_vocab_size or vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class a ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A_ ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ = {0: '''batch'''}
snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A_ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(lowercase_ , self ).outputs
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
snake_case_ = common_inputs['''decoder_input_ids'''].shape[1]
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ ,snake_case_ = self.num_layers
snake_case_ = min(lowercase_ , lowercase_ )
snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers
snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ ,snake_case_ = self.num_layers
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs['''attention_mask'''].dtype
snake_case_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
snake_case_ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
snake_case_ = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@property
def A_ ( self : List[str] ):
return 1e-4
| 56 | 0 |
def UpperCAmelCase_ (_lowerCAmelCase : bytes ):
return "".join([hex(_lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(_lowerCAmelCase )] )
def UpperCAmelCase_ (_lowerCAmelCase : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_lowerCAmelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_lowerCAmelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 171 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 1 |
"""simple docstring"""
import string
import numpy
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase )
class UpperCamelCase :
UpperCAmelCase : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase : Union[str, Any] = numpy.vectorize(lambda lowercase : x % 36 )
UpperCAmelCase : Optional[int] = numpy.vectorize(lowerCamelCase_ )
def __init__(self : Optional[Any] , _A : numpy.ndarray) -> None:
__snake_case : Dict = self.modulus(__snake_case) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case : Union[str, Any] = encrypt_key.shape[0]
def _lowercase (self : Any , _A : str) -> int:
return self.key_string.index(__snake_case)
def _lowercase (self : Tuple , _A : int) -> str:
return self.key_string[round(__snake_case)]
def _lowercase (self : List[str]) -> None:
__snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : Union[str, Any] = det % len(self.key_string)
__snake_case : Tuple = len(self.key_string)
if greatest_common_divisor(__snake_case , len(self.key_string)) != 1:
__snake_case : Optional[Any] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__snake_case)
def _lowercase (self : Dict , _A : str) -> str:
__snake_case : Dict = [char for char in text.upper() if char in self.key_string]
__snake_case : Union[str, Any] = chars[-1]
while len(__snake_case) % self.break_key != 0:
chars.append(__snake_case)
return "".join(__snake_case)
def _lowercase (self : Optional[Any] , _A : str) -> str:
__snake_case : List[Any] = self.process_text(text.upper())
__snake_case : Any = ''
for i in range(0 , len(__snake_case) - self.break_key + 1 , self.break_key):
__snake_case : List[str] = text[i : i + self.break_key]
__snake_case : Union[str, Any] = [self.replace_letters(__snake_case) for char in batch]
__snake_case : List[str] = numpy.array([vec]).T
__snake_case : Optional[int] = self.modulus(self.encrypt_key.dot(__snake_case)).T.tolist()[
0
]
__snake_case : Any = ''.join(
self.replace_digits(__snake_case) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowercase (self : Optional[Any]) -> numpy.ndarray:
__snake_case : Tuple = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : Dict = det % len(self.key_string)
__snake_case : List[Any] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
__snake_case : Optional[Any] = i
break
__snake_case : str = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(__snake_case))
def _lowercase (self : Tuple , _A : str) -> str:
__snake_case : Optional[int] = self.make_decrypt_key()
__snake_case : Optional[Any] = self.process_text(text.upper())
__snake_case : Optional[int] = ''
for i in range(0 , len(__snake_case) - self.break_key + 1 , self.break_key):
__snake_case : Any = text[i : i + self.break_key]
__snake_case : Union[str, Any] = [self.replace_letters(__snake_case) for char in batch]
__snake_case : Tuple = numpy.array([vec]).T
__snake_case : List[Any] = self.modulus(decrypt_key.dot(__snake_case)).T.tolist()[0]
__snake_case : str = ''.join(
self.replace_digits(__snake_case) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = int(input('Enter the order of the encryption key: ' ) )
__snake_case : Any = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(__lowerCAmelCase ):
__snake_case : str = [int(__lowerCAmelCase ) for x in input().split()]
hill_matrix.append(__lowerCAmelCase )
__snake_case : Union[str, Any] = HillCipher(numpy.array(__lowerCAmelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__snake_case : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__snake_case : List[str] = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(__lowerCAmelCase ) )
elif option == "2":
__snake_case : List[str] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 172 |
import math
def __lowercase ( __lowerCAmelCase : int ):
a__ = [True] * n
a__ = False
a__ = False
a__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < n:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __lowercase ( __lowerCAmelCase : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
a__ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_0_0
a__ = prime_sieve(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = primes[prime_index]
while (last_prime**2) <= limit:
a__ = primes[prime_index + 1]
a__ = last_prime**2
a__ = next_prime**2
# Get numbers divisible by lps(current)
a__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 240 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=36 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Union[str, Any] = parent
lowercase__: List[str] = batch_size
lowercase__: Union[str, Any] = seq_length
lowercase__: Tuple = is_training
lowercase__: Optional[int] = use_input_mask
lowercase__: Optional[int] = use_token_type_ids
lowercase__: Dict = use_labels
lowercase__: Any = vocab_size
lowercase__: int = embedding_size
lowercase__: int = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: Tuple = num_hidden_groups
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: int = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: int = attention_probs_dropout_prob
lowercase__: Any = max_position_embeddings
lowercase__: Tuple = type_vocab_size
lowercase__: Optional[int] = type_sequence_label_size
lowercase__: str = initializer_range
lowercase__: Optional[int] = num_labels
lowercase__: List[Any] = num_choices
lowercase__: Union[str, Any] = scope
def _snake_case ( self ):
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[Any] = None
if self.use_input_mask:
lowercase__: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Dict = None
if self.use_token_type_ids:
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: str = None
lowercase__: List[str] = None
lowercase__: Tuple = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Any = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = self.num_labels
lowercase__: List[Any] = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = self.num_choices
lowercase__: Any = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Any = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): int = config_and_inputs
lowercase__: List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :str = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Any = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: Optional[int] = AlbertModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: Tuple = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: str = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Union[str, Any] = AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase__: Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__: Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__: Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: Optional[Any] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 2 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowerCamelCase_ = {
'google/rembert': 256,
}
lowerCamelCase_ = '▁'
class UpperCamelCase_ (a_ ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = RemBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , **lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase_ : List[str] = do_lower_case
UpperCAmelCase_ : List[str] = remove_space
UpperCAmelCase_ : int = keep_accents
UpperCAmelCase_ : int = vocab_file
UpperCAmelCase_ : Any = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] = None ) -> List[Any]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : str = False ) -> Any:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any = None ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict = None ) -> List[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(_lowerCamelCase ) )
return
UpperCAmelCase_ : str = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 268 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_00 ):
'''simple docstring'''
lowercase = 0
lowercase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Tuple:
_lowerCamelCase = tmp_path / '''file.csv'''
_lowerCamelCase = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[int]:
_lowerCamelCase = tmp_path / '''malformed_file.csv'''
_lowerCamelCase = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : List[Any] ) -> List[Any]:
_lowerCamelCase = tmp_path / '''csv_with_image.csv'''
_lowerCamelCase = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def lowerCAmelCase_( lowercase_ : Any ) -> Tuple:
_lowerCamelCase = tmp_path / '''csv_with_label.csv'''
_lowerCamelCase = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def lowerCAmelCase_( lowercase_ : int ) -> Tuple:
_lowerCamelCase = tmp_path / '''csv_with_int_list.csv'''
_lowerCamelCase = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
_lowerCamelCase = Csv()
_lowerCamelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowercase_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowercase_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Any:
with open(lowercase_ , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.read().splitlines()[1]
_lowerCamelCase = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
_lowerCamelCase = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
_lowerCamelCase = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Dict:
with open(lowercase_ , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.read().splitlines()[1:]
_lowerCamelCase = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
_lowerCamelCase = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
_lowerCamelCase = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowercase_ ) for label in labels]
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowercase_ : [int(lowercase_ ) for i in x.split()]} )
_lowerCamelCase = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
_lowerCamelCase = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 368 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = 2_5_6
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = cva.imread(lowerCamelCase__ , 0 )
_lowerCamelCase = copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
_lowerCamelCase = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = x[i] / self.k
self.sk += prk
_lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase = int(last % last )
_lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
_lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def snake_case__ ( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__SCREAMING_SNAKE_CASE : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 73 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCamelCase (_a ):
_lowercase = """yolos"""
def __init__( self: Tuple,A_: Any=768,A_: Any=12,A_: str=12,A_: Optional[int]=3072,A_: Dict="gelu",A_: Optional[Any]=0.0,A_: int=0.0,A_: int=0.0_2,A_: str=1E-12,A_: List[Any]=[512, 864],A_: Optional[int]=16,A_: Tuple=3,A_: Optional[int]=True,A_: List[str]=100,A_: Dict=True,A_: Optional[int]=False,A_: Dict=1,A_: Any=5,A_: Optional[Any]=2,A_: Optional[Any]=5,A_: str=2,A_: Union[str, Any]=0.1,**A_: Tuple,):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = qkv_bias
__UpperCamelCase = num_detection_tokens
__UpperCamelCase = use_mid_position_embeddings
__UpperCamelCase = auxiliary_loss
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
class __lowerCamelCase (_a ):
_lowercase = version.parse("""1.11""" )
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return 12
| 310 |
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase (a__ :Union[str, Any] , a__ :str , a__ :List[Any] ):
"""simple docstring"""
UpperCamelCase__ = BertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase__ = BertForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a__ , a__ , a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = """ZinengTang/tvlt-base"""
UpperCamelCase__ = tempfile.mkdtemp()
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 87 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_A = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( lowerCAmelCase ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : Optional[int] = False
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if args.student_type == "roberta":
UpperCAmelCase__ : Tuple = False
def a__ ( ) -> int:
UpperCAmelCase__ : Dict = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=lowerCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=lowerCAmelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=lowerCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCAmelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=lowerCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=lowerCAmelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=lowerCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=lowerCAmelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCAmelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCAmelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCAmelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=lowerCAmelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=lowerCAmelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=lowerCAmelCase , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=lowerCAmelCase , default=40_00 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : List[Any] = parser.parse_args()
sanity_checks(lowerCAmelCase )
# ARGS #
init_gpu_params(lowerCAmelCase )
set_seed(lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(lowerCAmelCase ) , lowerCAmelCase , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : List[Any] = tokenizer.all_special_tokens.index(lowerCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : List[str] = pickle.load(lowerCAmelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : List[Any] = pickle.load(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.maximum(lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : int = 0.0 # do not predict special tokens
UpperCAmelCase__ : str = torch.from_numpy(lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = LmSeqsDataset(params=lowerCAmelCase , data=lowerCAmelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
UpperCAmelCase__ : List[str] = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase__ : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = student_model_class(lowerCAmelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase , lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase , lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[int] = Distiller(
params=lowerCAmelCase , dataset=lowerCAmelCase , token_probs=lowerCAmelCase , student=lowerCAmelCase , teacher=lowerCAmelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 171 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""OwlViTFeatureExtractor"""]
_A = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''rwkv'''
__snake_case = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[int]=50_277 , __UpperCAmelCase : str=1_024 , __UpperCAmelCase : Tuple=4_096 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Dict=6 , __UpperCAmelCase : int=False , __UpperCAmelCase : Union[str, Any]=True , **__UpperCAmelCase : List[str] , ) ->Optional[int]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 26 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=99 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Optional[int]=36 , UpperCamelCase : Union[str, Any]=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Any=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : Dict=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : Dict=4 , UpperCamelCase : Tuple=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_hidden_groups
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = AlbertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowercase__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = AlbertForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , sentence_order_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase__ (self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AlbertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AlbertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = AlbertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self : str , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = AlbertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = AlbertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : int = True
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = AlbertModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AlbertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase )
lowercase__ = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
| 2 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 1 |
"""simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE = 100 ) ->int:
a__: Dict = sum(i * i for i in range(1 , n + 1 ) )
a__: Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 371 | """simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase__ = 'Enter the base and the power separated by a comma: '
lowercase__ , lowercase__ = map(int, input(prompt).split(','))
lowercase__ , lowercase__ = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase__ = res(xa, ya)
lowercase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 203 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( A__ ):
"""simple docstring"""
_a = ['''image_processor''', '''tokenizer''']
_a = '''Pix2StructImageProcessor'''
_a = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 2048 , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCamelCase__ :Tuple = self.tokenizer
UpperCamelCase__ :Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCamelCase__ :List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_patches=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
# add pixel_values and bbox
UpperCamelCase__ :List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and not self.image_processor.is_vqa:
UpperCamelCase__ :List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if "attention_mask" in text_encoding:
UpperCamelCase__ :List[Any] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
UpperCamelCase__ :Dict = text_encoding.pop('''input_ids''' )
else:
UpperCamelCase__ :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__ )
return encoding_image_processor
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.tokenizer.model_input_names
UpperCamelCase__ :int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 97 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
UpperCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , **UpperCamelCase__ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def A ( self : Dict , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def A ( self : int , **UpperCamelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def A ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 358 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 249 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_ ( __A ):
def __init__( self : List[Any] , *lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , **lowercase_ : str ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
lowercase__ : Tuple = eval_examples
lowercase__ : Optional[int] = post_process_function
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : Any=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : Any , ) -> Dict[str, float]:
lowercase__ : List[Any] = gen_kwargs.copy()
lowercase__ : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowercase__ : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowercase__ : Tuple = gen_kwargs
lowercase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : str = self.get_eval_dataloader(lowercase_ )
lowercase__ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[str] = self.compute_metrics
lowercase__ : str = None
lowercase__ : Any = time.time()
lowercase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Optional[int] = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : int = compute_metrics
lowercase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : List[str] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowercase__ : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def __UpperCamelCase ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str = "test" , **lowercase_ : Any ) -> Tuple:
lowercase__ : Any = gen_kwargs.copy()
lowercase__ : Tuple = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[Any] = self.compute_metrics
lowercase__ : Optional[Any] = None
lowercase__ : Any = time.time()
lowercase__ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Dict = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : Optional[int] = compute_metrics
lowercase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , "predict" )
lowercase__ : List[Any] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : Tuple = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 87 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Tuple ={
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple =[
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
def get_masked_lm_array(lowerCAmelCase ):
_lowerCAmelCase = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_array(lowerCAmelCase ):
_lowerCAmelCase = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_layer_array(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_attention_layer_array(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = array.reshape(lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
print(f"Loading model based on config from {config_path}..." )
_lowerCAmelCase = BertConfig.from_json_file(lowerCAmelCase )
_lowerCAmelCase = BertForMaskedLM(lowerCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowerCAmelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_lowerCAmelCase = layer.attention.self
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
_lowerCAmelCase = layer.attention.output
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_attention_layer_norm/gamma""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_attention_layer_norm/beta""" )
# Intermediate
_lowerCAmelCase = layer.intermediate
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_intermediate_dense/kernel""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_intermediate_dense/bias""" )
# Output
_lowerCAmelCase = layer.output
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_dense/kernel""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_dense/bias""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_layer_norm/gamma""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_layer_norm/beta""" )
# Embeddings
_lowerCAmelCase = get_encoder_array("""_position_embedding_layer/embeddings""" )
_lowerCAmelCase = get_encoder_array("""_type_embedding_layer/embeddings""" )
_lowerCAmelCase = get_encoder_array("""_embedding_norm_layer/gamma""" )
_lowerCAmelCase = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
_lowerCAmelCase = model.cls.predictions.transform
_lowerCAmelCase = get_masked_lm_array("""dense/kernel""" )
_lowerCAmelCase = get_masked_lm_array("""dense/bias""" )
_lowerCAmelCase = get_masked_lm_array("""layer_norm/gamma""" )
_lowerCAmelCase = get_masked_lm_array("""layer_norm/beta""" )
_lowerCAmelCase = get_masked_lm_array("""embedding_table""" )
# Pooling
_lowerCAmelCase = BertPooler(config=lowerCAmelCase )
_lowerCAmelCase = get_encoder_array("""_pooler_layer/kernel""" )
_lowerCAmelCase = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCAmelCase )
# Integration test - should load without any errors ;)
_lowerCAmelCase = BertForMaskedLM.from_pretrained(lowerCAmelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
A__ : Dict =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 220 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "camembert"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : List[Any] = hidden_size
_A : Dict = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : str = intermediate_size
_A : List[Any] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Tuple = type_vocab_size
_A : List[str] = initializer_range
_A : Dict = layer_norm_eps
_A : str = position_embedding_type
_A : Dict = use_cache
_A : Any = classifier_dropout
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _snake_case ( a__ , a__ ):
snake_case__ = 1
@register_to_config
def __init__( self : Any , UpperCAmelCase : Union[str, Any]=2000 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Union[str, Any]=20 , UpperCAmelCase : Any=1E-3 ):
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Dict = None
__lowerCamelCase : List[Any] = None
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, torch.device] = None ):
__lowerCamelCase : Tuple = torch.linspace(1 , self.config.sampling_eps , __UpperCAmelCase , device=__UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Union[str, Any] = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Tuple = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : str = std.unsqueeze(-1 )
__lowerCamelCase : Union[str, Any] = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Tuple = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : str = beta_t.unsqueeze(-1 )
__lowerCamelCase : Union[str, Any] = -0.5 * beta_t * x
__lowerCamelCase : Union[str, Any] = torch.sqrt(__UpperCAmelCase )
__lowerCamelCase : List[Any] = drift - diffusion**2 * score
__lowerCamelCase : List[str] = x + drift * dt
# add noise
__lowerCamelCase : Dict = randn_tensor(x.shape , layout=x.layout , generator=__UpperCAmelCase , device=x.device , dtype=x.dtype )
__lowerCamelCase : int = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps | 354 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 64 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase_ = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowercase_ = None
def lowerCamelCase ( ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__lowerCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__lowerCamelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_SCREAMING_SNAKE_CASE = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowerCamelCase ( __lowerCamelCase : Dict ) ->Tuple:
def remove_articles(__lowerCamelCase : Optional[int] ):
return ARTICLES_REGEX.sub(""" """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Tuple ):
_SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def lowerCamelCase ( __lowerCamelCase : Optional[Any] ) ->Any:
if not s:
return []
return normalize_answer(__lowerCamelCase ).split()
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) ->str:
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = get_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = get_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = collections.Counter(__lowerCamelCase ) & collections.Counter(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = sum(common.values() )
if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_SCREAMING_SNAKE_CASE = 1.0 * num_same / len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 1.0 * num_same / len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict ) ->Dict:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_SCREAMING_SNAKE_CASE = qa["id"]
_SCREAMING_SNAKE_CASE = [t for t in qa["answers"]["text"] if normalize_answer(__lowerCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_SCREAMING_SNAKE_CASE = [""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
_SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
_SCREAMING_SNAKE_CASE = max(compute_exact(__lowerCamelCase , __lowerCamelCase ) for a in gold_answers )
_SCREAMING_SNAKE_CASE = max(compute_fa(__lowerCamelCase , __lowerCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str ) ->int:
_SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
_SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
_SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid] )
else:
_SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None ) ->Optional[int]:
if not qid_list:
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) ->Optional[int]:
for k in new_eval:
_SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) ->str:
plt.step(__lowerCamelCase , __lowerCamelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__lowerCamelCase , __lowerCamelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCamelCase )
plt.savefig(__lowerCamelCase )
plt.clf()
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=None ) ->List[Any]:
_SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : na_probs[k] )
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1.0
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = [1.0]
_SCREAMING_SNAKE_CASE = [0.0]
_SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(__lowerCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_SCREAMING_SNAKE_CASE = true_pos / float(i + 1 )
_SCREAMING_SNAKE_CASE = true_pos / float(__lowerCamelCase )
if i == len(__lowerCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCamelCase )
recalls.append(__lowerCamelCase )
if out_image:
plot_pr_curve(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return {"ap": 100.0 * avg_prec}
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) ->Any:
if out_image_dir and not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_SCREAMING_SNAKE_CASE = make_precision_recall_eval(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , out_image=os.path.join(__lowerCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_SCREAMING_SNAKE_CASE = make_precision_recall_eval(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , out_image=os.path.join(__lowerCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_SCREAMING_SNAKE_CASE = {k: float(__lowerCamelCase ) for k, v in qid_to_has_ans.items()}
_SCREAMING_SNAKE_CASE = make_precision_recall_eval(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , out_image=os.path.join(__lowerCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__lowerCamelCase , __lowerCamelCase , """pr_exact""" )
merge_eval(__lowerCamelCase , __lowerCamelCase , """pr_f1""" )
merge_eval(__lowerCamelCase , __lowerCamelCase , """pr_oracle""" )
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) ->Union[str, Any]:
if not qid_list:
return
_SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
_SCREAMING_SNAKE_CASE = np.ones_like(__lowerCamelCase ) / float(len(__lowerCamelCase ) )
plt.hist(__lowerCamelCase , weights=__lowerCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(__lowerCamelCase , F'na_prob_hist_{name}.png' ) )
plt.clf()
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_SCREAMING_SNAKE_CASE = num_no_ans
_SCREAMING_SNAKE_CASE = cur_score
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : na_probs[k] )
for i, qid in enumerate(__lowerCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
_SCREAMING_SNAKE_CASE = -1
else:
_SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
_SCREAMING_SNAKE_CASE = cur_score
_SCREAMING_SNAKE_CASE = na_probs[qid]
return 100.0 * best_score / len(__lowerCamelCase ), best_thresh
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : int ) ->List[Any]:
_SCREAMING_SNAKE_CASE = find_best_thresh(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = find_best_thresh(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = best_exact
_SCREAMING_SNAKE_CASE = exact_thresh
_SCREAMING_SNAKE_CASE = best_fa
_SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase ( ) ->Optional[int]:
with open(OPTS.data_file ) as f:
_SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = dataset_json["data"]
with open(OPTS.pred_file ) as f:
_SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
_SCREAMING_SNAKE_CASE = make_qid_to_has_ans(__lowerCamelCase ) # maps qid to True/False
_SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
_SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
_SCREAMING_SNAKE_CASE = get_raw_scores(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = apply_no_ans_threshold(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , OPTS.na_prob_thresh )
_SCREAMING_SNAKE_CASE = apply_no_ans_threshold(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , OPTS.na_prob_thresh )
_SCREAMING_SNAKE_CASE = make_eval_dict(__lowerCamelCase , __lowerCamelCase )
if has_ans_qids:
_SCREAMING_SNAKE_CASE = make_eval_dict(__lowerCamelCase , __lowerCamelCase , qid_list=__lowerCamelCase )
merge_eval(__lowerCamelCase , __lowerCamelCase , """HasAns""" )
if no_ans_qids:
_SCREAMING_SNAKE_CASE = make_eval_dict(__lowerCamelCase , __lowerCamelCase , qid_list=__lowerCamelCase )
merge_eval(__lowerCamelCase , __lowerCamelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCamelCase , __lowerCamelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__lowerCamelCase , __lowerCamelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
else:
print(json.dumps(__lowerCamelCase , indent=2 ) )
if __name__ == "__main__":
lowercase_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case : int = grid[0]
for row_n in range(1 , len(lowercase ) ):
snake_case : Optional[Any] = grid[row_n]
snake_case : List[str] = fill_row(lowercase , lowercase )
snake_case : Optional[int] = grid[row_n]
return grid[-1][-1]
def __lowerCAmelCase ( lowercase : list , lowercase : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.array(__UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# compute the shape of the output matrix
snake_case_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
snake_case_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
snake_case_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case_ = 0
snake_case_ = 0
return updated_arr
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.array(__UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# compute the shape of the output matrix
snake_case_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
snake_case_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
snake_case_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case_ = 0
snake_case_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
a : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 72 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a : str = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a ( _lowerCamelCase ):
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int]=None , lowercase_ : str=1 ):
snake_case_ = tokenizer
snake_case_ = dataset
snake_case_ = len(lowercase_ ) if n_tasks is None else n_tasks
snake_case_ = n_copies
def __iter__( self : Optional[Any] ):
snake_case_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
snake_case_ = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( _lowerCamelCase ):
def __init__( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int ):
snake_case_ = start_length
snake_case_ = eof_strings
snake_case_ = tokenizer
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , **lowercase_ : List[str] ):
snake_case_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowercase_ )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = re.split('''(%s)''' % '''|'''.join(__UpperCAmelCase ), __UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=20, **__UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = defaultdict(__UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCAmelCase ) ):
with torch.no_grad():
snake_case_ = batch['''ids'''].shape[-1]
snake_case_ = accelerator.unwrap_model(__UpperCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=__UpperCAmelCase, **__UpperCAmelCase )
# each task is generated batch_size times
snake_case_ = batch['''task_id'''].repeat(__UpperCAmelCase )
snake_case_ = accelerator.pad_across_processes(
__UpperCAmelCase, dim=1, pad_index=tokenizer.pad_token_id )
snake_case_ ,snake_case_ = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ = generated_tokens.cpu().numpy()
snake_case_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCAmelCase, __UpperCAmelCase ):
gen_token_dict[task].append(__UpperCAmelCase )
snake_case_ = [[] for _ in range(__UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ = tokenizer.decode(__UpperCAmelCase, skip_special_tokens=__UpperCAmelCase, clean_up_tokenization_spaces=__UpperCAmelCase )
code_gens[task].append(remove_last_block(__UpperCAmelCase ) )
return code_gens
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = HfArgumentParser(__UpperCAmelCase )
snake_case_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ = '''false'''
if args.num_workers is None:
snake_case_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ = Accelerator()
set_seed(args.seed, device_specific=__UpperCAmelCase )
# Load model and tokenizer
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, __UpperCAmelCase, __UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case_ = load_dataset('''openai_humaneval''' )
snake_case_ = load_metric('''code_eval''' )
snake_case_ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
snake_case_ = args.n_samples // args.batch_size
snake_case_ = TokenizedDataset(__UpperCAmelCase, human_eval['''test'''], n_copies=__UpperCAmelCase, n_tasks=__UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ = DataLoader(__UpperCAmelCase, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ = code_eval_metric.compute(references=[''''''], predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
snake_case_ ,snake_case_ = accelerator.prepare(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = complete_code(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, n_tasks=__UpperCAmelCase, batch_size=args.batch_size, **__UpperCAmelCase, )
if accelerator.is_main_process:
snake_case_ = []
for task in tqdm(range(__UpperCAmelCase ) ):
snake_case_ = human_eval['''test'''][task]['''test''']
snake_case_ = F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_ ,snake_case_ = code_eval_metric.compute(
references=__UpperCAmelCase, predictions=__UpperCAmelCase, num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file, '''w''' ) as fp:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 72 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE = DistilBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Optional[Any]:
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase = do_lower_case
def _snake_case ( self , lowercase , lowercase=None ) -> int:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 46 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
__lowercase : Tuple = {}
if train_file is not None:
__lowercase : List[Any] = [train_file]
if eval_file is not None:
__lowercase : List[str] = [eval_file]
if test_file is not None:
__lowercase : List[Any] = [test_file]
__lowercase : List[str] = datasets.load_dataset('''csv''' , data_files=__UpperCamelCase )
__lowercase : str = list(ds[list(files.keys() )[0]].features.keys() )
__lowercase : Union[str, Any] = features_name.pop(__UpperCamelCase )
__lowercase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase : List[str] = {label: i for i, label in enumerate(__UpperCamelCase )}
__lowercase : Optional[Any] = tokenizer.model_input_names
__lowercase : Optional[Any] = {}
if len(__UpperCamelCase ) == 1:
for k in files.keys():
__lowercase : str = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) , batched=__UpperCamelCase , )
elif len(__UpperCamelCase ) == 2:
for k in files.keys():
__lowercase : List[Any] = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , ) , batched=__UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase : Dict = {k: v for k, v in ex.items() if k in input_names}
__lowercase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase : Tuple = {k: v for k, v in ex.items() if k in input_names}
__lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowercase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(metadata={"help": "Which column contains the label"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the training file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the development file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the test file"} )
UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase =field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase ,__lowercase ,__lowercase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase ,__lowercase ,__lowercase ,__lowercase : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCamelCase ) -> Dict:
__lowercase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase : Optional[Any] = TFTrainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase : List[Any] = trainer.evaluate()
__lowercase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 249 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :int = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 329 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }") | 329 | 1 |
'''simple docstring'''
from collections import deque
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
_A = process_name # process name
_A = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_A = arrival_time
_A = burst_time # remaining burst time
_A = 0 # total time of the process wait in ready queue
_A = 0 # time from arrival time to completion time
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : deque[Process] , __UpperCAmelCase : int , ):
'''simple docstring'''
_A = number_of_queues
# time slice of queues that round robin algorithm applied
_A = time_slices
# unfinished process is in this ready_queue
_A = queue
# current time
_A = current_time
# finished process is in this sequence queue
_A = deque()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase ( self : Any , __UpperCAmelCase : list[Process] ):
'''simple docstring'''
_A = []
for i in range(len(__UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : list[Process] ):
'''simple docstring'''
_A = []
for i in range(len(__UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : list[Process] ):
'''simple docstring'''
_A = []
for i in range(len(__UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase ( self : Any , __UpperCAmelCase : deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : deque[Process] ):
'''simple docstring'''
_A = deque() # sequence deque of finished process
while len(__UpperCAmelCase ) != 0:
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_A = 0
# set the process's turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# set the completion time
_A = self.current_time
# add the process to queue that has finished queue
finished.append(__UpperCAmelCase )
self.finish_queue.extend(__UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : deque[Process] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__UpperCAmelCase ) ):
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_A = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_A = 0
# set the finish time
_A = self.current_time
# update the process' turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__UpperCAmelCase )
self.finish_queue.extend(__UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase ( self : str ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_A , _A = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase_ = Process('''P1''', 0, 53)
lowerCamelCase_ = Process('''P2''', 0, 17)
lowerCamelCase_ = Process('''P3''', 0, 68)
lowerCamelCase_ = Process('''P4''', 0, 24)
lowerCamelCase_ = 3
lowerCamelCase_ = [17, 25]
lowerCamelCase_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowerCamelCase_ = Process('''P1''', 0, 53)
lowerCamelCase_ = Process('''P2''', 0, 17)
lowerCamelCase_ = Process('''P3''', 0, 68)
lowerCamelCase_ = Process('''P4''', 0, 24)
lowerCamelCase_ = 3
lowerCamelCase_ = [17, 25]
lowerCamelCase_ = deque([Pa, Pa, Pa, Pa])
lowerCamelCase_ = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 79 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a ( a_ ):
UpperCAmelCase_ : BigBirdConfig
UpperCAmelCase_ : jnp.dtype =jnp.floataa
UpperCAmelCase_ : bool =True
def UpperCamelCase_ ( self ):
super().setup()
lowercase = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
lowercase = super().__call__(*_lowerCamelCase , **_lowerCamelCase )
lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a ( a_ ):
UpperCAmelCase_ : str =FlaxBigBirdForNaturalQuestionsModule
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Tuple ):
'''simple docstring'''
def cross_entropy(__snake_case : Dict , __snake_case : str , __snake_case : Any=None ):
lowercase = logits.shape[-1]
lowercase = (labels[..., None] == jnp.arange(__snake_case )[None]).astype('f4' )
lowercase = jax.nn.log_softmax(__snake_case , axis=-1 )
lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase = reduction(__snake_case )
return loss
lowercase = partial(__snake_case , reduction=jnp.mean )
lowercase = cross_entropy(__snake_case , __snake_case )
lowercase = cross_entropy(__snake_case , __snake_case )
lowercase = cross_entropy(__snake_case , __snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a :
UpperCAmelCase_ : str ="google/bigbird-roberta-base"
UpperCAmelCase_ : int =3000
UpperCAmelCase_ : int =1_0500
UpperCAmelCase_ : int =128
UpperCAmelCase_ : int =3
UpperCAmelCase_ : int =1
UpperCAmelCase_ : int =5
# tx_args
UpperCAmelCase_ : float =3e-5
UpperCAmelCase_ : float =0.0
UpperCAmelCase_ : int =2_0000
UpperCAmelCase_ : float =0.00_95
UpperCAmelCase_ : str ="bigbird-roberta-natural-questions"
UpperCAmelCase_ : str ="training-expt"
UpperCAmelCase_ : str ="data/nq-training.jsonl"
UpperCAmelCase_ : str ="data/nq-validation.jsonl"
def UpperCamelCase_ ( self ):
os.makedirs(self.base_dir , exist_ok=_lowerCamelCase )
lowercase = os.path.join(self.base_dir , self.save_dir )
lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class a :
UpperCAmelCase_ : int
UpperCAmelCase_ : int =4096 # no dynamic padding on TPUs
def __call__( self , _lowerCamelCase ):
lowercase = self.collate_fn(_lowerCamelCase )
lowercase = jax.tree_util.tree_map(_lowerCamelCase , _lowerCamelCase )
return batch
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase , lowercase = self.fetch_inputs(features['input_ids'] )
lowercase = {
'input_ids': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'attention_mask': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = [self._fetch_inputs(_lowerCamelCase ) for ids in input_ids]
return zip(*_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = [1 for _ in range(len(_lowerCamelCase ) )]
while len(_lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any]=None ):
'''simple docstring'''
if seed is not None:
lowercase = dataset.shuffle(seed=__snake_case )
for i in range(len(__snake_case ) // batch_size ):
lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__snake_case )
@partial(jax.pmap , axis_name='batch' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : List[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
def loss_fn(__snake_case : str ):
lowercase = model_inputs.pop('start_labels' )
lowercase = model_inputs.pop('end_labels' )
lowercase = model_inputs.pop('pooled_labels' )
lowercase = state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case )
lowercase , lowercase , lowercase = outputs
return state.loss_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
lowercase , lowercase = jax.random.split(__snake_case )
lowercase = jax.value_and_grad(__snake_case )
lowercase , lowercase = grad_fn(state.params )
lowercase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowercase = jax.lax.pmean(__snake_case , 'batch' )
lowercase = state.apply_gradients(grads=__snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , **__snake_case : Dict ):
'''simple docstring'''
lowercase = model_inputs.pop('start_labels' )
lowercase = model_inputs.pop('end_labels' )
lowercase = model_inputs.pop('pooled_labels' )
lowercase = state.apply_fn(**__snake_case , params=state.params , train=__snake_case )
lowercase , lowercase , lowercase = outputs
lowercase = state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
lowercase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class a ( train_state.TrainState ):
UpperCAmelCase_ : Callable =struct.field(pytree_node=a_ )
@dataclass
class a :
UpperCAmelCase_ : Args
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : wandb
UpperCAmelCase_ : Callable =None
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowercase = model.params
lowercase = TrainState.create(
apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , loss_fn=_lowerCamelCase , )
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase = restore_checkpoint(_lowerCamelCase , _lowerCamelCase )
lowercase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowercase , lowercase = build_tx(**_lowerCamelCase )
lowercase = train_state.TrainState(
step=_lowerCamelCase , apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , opt_state=_lowerCamelCase , )
lowercase = args
lowercase = data_collator
lowercase = lr
lowercase = params
lowercase = jax_utils.replicate(_lowerCamelCase )
return state
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.args
lowercase = len(_lowerCamelCase ) // args.batch_size
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(_lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase = jnp.array(0 , dtype=jnp.floataa )
lowercase = get_batched_dataset(_lowerCamelCase , args.batch_size , seed=_lowerCamelCase )
lowercase = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc=F'Running EPOCH-{epoch}' ):
lowercase = self.data_collator(_lowerCamelCase )
lowercase , lowercase , lowercase = self.train_step_fn(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowercase = jax_utils.unreplicate(state.step )
lowercase = running_loss.item() / i
lowercase = self.scheduler_fn(state_step - 1 )
lowercase = self.evaluate(_lowerCamelCase , _lowerCamelCase )
lowercase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(_lowerCamelCase ) )
self.logger.log(_lowerCamelCase , commit=_lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = get_batched_dataset(_lowerCamelCase , self.args.batch_size )
lowercase = len(_lowerCamelCase ) // self.args.batch_size
lowercase = jnp.array(0 , dtype=jnp.floataa )
lowercase = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc='Evaluating ... ' ):
lowercase = self.data_collator(_lowerCamelCase )
lowercase = self.val_step_fn(_lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = jax_utils.unreplicate(_lowerCamelCase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(_lowerCamelCase , params=state.params )
with open(os.path.join(_lowerCamelCase , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCamelCase , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(_lowerCamelCase , 'data_collator.joblib' ) )
with open(os.path.join(_lowerCamelCase , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , _lowerCamelCase )
print('DONE' )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Tuple ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(__snake_case , 'flax_model.msgpack' ) , 'rb' ) as f:
lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(__snake_case , 'opt_state.msgpack' ) , 'rb' ) as f:
lowercase = from_bytes(state.opt_state , f.read() )
lowercase = joblib.load(os.path.join(__snake_case , 'args.joblib' ) )
lowercase = joblib.load(os.path.join(__snake_case , 'data_collator.joblib' ) )
with open(os.path.join(__snake_case , 'training_state.json' ) , 'r' ) as f:
lowercase = json.load(__snake_case )
lowercase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : str , __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
lowercase = num_train_steps - warmup_steps
lowercase = optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case )
lowercase = optax.linear_schedule(init_value=__snake_case , end_value=1e-7 , transition_steps=__snake_case )
lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
def weight_decay_mask(__snake_case : Tuple ):
lowercase = traverse_util.flatten_dict(__snake_case )
lowercase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(__snake_case )
lowercase = scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case )
lowercase = optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case )
return tx, lr
| 220 | 0 |
import math
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Dict = input('''Enter message: ''' )
snake_case_ : Optional[int] = int(input(f'''Enter key [2-{len(snake_case_ ) - 1}]: ''' ) )
snake_case_ : Union[str, Any] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
snake_case_ : int = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith('''d''' ):
snake_case_ : str = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = [""""""] * key
for col in range(snake_case_ ):
snake_case_ : Optional[Any] = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = math.ceil(len(snake_case_ ) / key )
snake_case_ : Tuple = key
snake_case_ : Any = (num_cols * num_rows) - len(snake_case_ )
snake_case_ : List[str] = [""""""] * num_cols
snake_case_ : Dict = 0
snake_case_ : Optional[int] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
snake_case_ : Dict = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 355 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase__ ( snake_case_ : np.ndarray ) -> np.ndarray:
__snake_case , __snake_case , __snake_case = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def lowerCamelCase__ ( snake_case_ : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
__snake_case = np.zeros_like(snake_case_ )
__snake_case = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__snake_case = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__snake_case = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__snake_case = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
snake_case_ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
snake_case_ = np.array(Image.open(lena_path))
# kernel to be applied
snake_case_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
snake_case_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
snake_case_ = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 24 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (DDIMParallelScheduler,)
__UpperCAmelCase : str = (("eta", 0.0), ("num_inference_steps", 50))
def __snake_case ( self : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : List[Any] , **lowerCamelCase : int ) -> Dict:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase )
__snake_case : Any = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : Dict = 10, 0.0
__snake_case : Tuple = self.dummy_model()
__snake_case : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for t in scheduler.timesteps:
__snake_case : Dict = model(lowerCamelCase , lowerCamelCase )
__snake_case : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __snake_case ( self : str ) -> Tuple:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
__snake_case : Optional[Any] = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __snake_case ( self : Dict ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __snake_case ( self : int ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __snake_case ( self : Any ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> List[Any]:
self.check_over_configs(thresholding=lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , )
def __snake_case ( self : List[Any] ) -> str:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase , num_inference_steps=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase , eta=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Any:
__snake_case : int = self.scheduler_classes[0]
__snake_case : Any = self.get_scheduler_config()
__snake_case : int = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __snake_case ( self : List[Any] ) -> str:
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : Any = 10, 0.0
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Optional[int] = self.dummy_model()
__snake_case : List[str] = self.dummy_sample_deter
__snake_case : str = self.dummy_sample_deter + 0.1
__snake_case : Tuple = self.dummy_sample_deter - 0.1
__snake_case : Optional[Any] = samplea.shape[0]
__snake_case : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : Dict = torch.arange(lowerCamelCase )[0:3, None].repeat(1 , lowerCamelCase )
__snake_case : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Tuple = scheduler.batch_step_no_noise(lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase )
__snake_case : Tuple = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Optional[int] = self.full_loop()
__snake_case : Any = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case : int = self.full_loop(prediction_type="v_prediction" )
__snake_case : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : List[str] = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __snake_case ( self : List[Any] ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : Dict = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 134 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_snake_case : int = "scheduler_config.json"
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Tuple = 5
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
class a :
"""simple docstring"""
__UpperCAmelCase : Dict = SCHEDULER_CONFIG_NAME
__UpperCAmelCase : Union[str, Any] = ["dtype"]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : int = True
@classmethod
def __snake_case ( cls : List[str] , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : List[str]=False , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case , __snake_case : List[str] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
__snake_case , __snake_case : Dict = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
__snake_case : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __snake_case ( self : Any , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[Any] ) -> int:
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Tuple ) -> List[Any]:
return self._get_compatibles()
@classmethod
def __snake_case ( cls : int ) -> Dict:
__snake_case : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__snake_case : int = importlib.import_module(__name__.split("." )[0] )
__snake_case : Tuple = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
assert len(__lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=jnp.floataa ):
def alpha_bar(__lowerCamelCase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__snake_case : List[Any] = []
for i in range(__lowerCamelCase ):
__snake_case : Dict = i / num_diffusion_timesteps
__snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) )
return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : int ) -> List[Any]:
__snake_case : Dict = scheduler.config
if config.trained_betas is not None:
__snake_case : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__snake_case : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__snake_case : Any = 1.0 - betas
__snake_case : int = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = state.alphas_cumprod
__snake_case : str = alphas_cumprod[timesteps] ** 0.5
__snake_case : Dict = sqrt_alpha_prod.flatten()
__snake_case : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
__snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : str = sqrt_one_minus_alpha_prod.flatten()
__snake_case : Tuple = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Dict = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 134 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case_ ( A_ : Optional[Any], A_ : Union[str, Any]="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) as f:
_lowerCamelCase : int = json.load(A_ )
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = []
for key, info in class_info.items():
_lowerCamelCase : Optional[int] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A_ ) )
_lowerCamelCase : Union[str, Any] = thing_ids
_lowerCamelCase : List[Any] = class_names
return metadata
class __snake_case ( unittest.TestCase):
def __init__( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Union[str, Any]=3_0 , __lowerCAmelCase : str=4_0_0 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=1_0 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=2_5_5 , __lowerCAmelCase : List[str]="shi-labs/oneformer_demo" , __lowerCAmelCase : str="ade20k_panoptic.json" , __lowerCAmelCase : List[Any]=1_0 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = min_resolution
_lowerCamelCase : str = max_resolution
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : int = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Any = image_mean
_lowerCamelCase : List[str] = image_std
_lowerCamelCase : List[str] = class_info_file
_lowerCamelCase : Optional[Any] = prepare_metadata(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = num_text
_lowerCamelCase : Any = repo_path
# for the post_process_functions
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : str = 1_0
_lowerCamelCase : Tuple = 1_0
_lowerCamelCase : List[str] = 3
_lowerCamelCase : Any = 4
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Tuple = do_reduce_labels
_lowerCamelCase : List[str] = ignore_index
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
if not batched:
_lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_lowerCamelCase , _lowerCamelCase : List[Any] = image.size
else:
_lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : str = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase : Any = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase : Optional[int] = self.size['''shortest_edge''']
_lowerCamelCase : Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase : Optional[Any] = self.size['''shortest_edge''']
_lowerCamelCase : Optional[int] = self.size['''shortest_edge''']
else:
_lowerCamelCase : List[str] = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : List[Any] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCamelCase : List[Any] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ : Any = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''ignore_index''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''class_info_file''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''num_text''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''repo_path''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''metadata''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCamelCase : Tuple = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : str = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Dict = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Any = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : str = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : List[Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : List[Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : List[str] = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict="np" ):
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_lowerCamelCase : int = self.image_processing_tester.num_labels
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase )
if with_segmentation_maps:
_lowerCamelCase : Optional[int] = num_labels
if is_instance_map:
_lowerCamelCase : List[Any] = list(range(__lowerCAmelCase ) ) * 2
_lowerCamelCase : int = dict(enumerate(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_lowerCamelCase : List[str] = [Image.fromarray(__lowerCAmelCase ) for annotation in annotations]
_lowerCamelCase : Tuple = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , __lowerCAmelCase , return_tensors='''pt''' , instance_id_to_semantic_id=__lowerCAmelCase , pad_and_return_pixel_mask=__lowerCAmelCase , )
return inputs
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
def common(__lowerCAmelCase : Dict=False , __lowerCAmelCase : Any=None ):
_lowerCamelCase : Optional[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowerCAmelCase , is_instance_map=__lowerCAmelCase , segmentation_type=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inputs['''mask_labels''']
_lowerCamelCase : Union[str, Any] = inputs['''class_labels''']
_lowerCamelCase : int = inputs['''pixel_values''']
_lowerCamelCase : Optional[int] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowerCAmelCase )
common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' )
common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = np.zeros((2_0, 5_0) )
_lowerCamelCase : Dict = 1
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = binary_mask_to_rle(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : str = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_lowerCamelCase : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase , target_sizes=__lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : Optional[int] = image_processor.post_process_instance_segmentation(__lowerCAmelCase , threshold=0 )
self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(__lowerCAmelCase , threshold=0 )
self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.