code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=snake_case_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=snake_case_ , default=5 )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=snake_case_ , default=1 )
parser.add_argument('''--freeze''' , type=snake_case_ , default=snake_case_ )
parser.add_argument('''--learning_rate''' , type=snake_case_ , default=5E-4 )
parser.add_argument('''--seed''' , type=snake_case_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=snake_case_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=snake_case_ , default=10 )
parser.add_argument('''--weight_decay''' , type=snake_case_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=snake_case_ , default='''./results''' )
return parser.parse_args()
a_ : Tuple = load('accuracy')
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ , __magic_name__ = eval_pred
__magic_name__ = np.argmax(snake_case_ , axis=1 )
return metric.compute(predictions=snake_case_ , references=snake_case_ )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , A ) -> None:
'''simple docstring'''
super().__init__()
__magic_name__ = trainer
def __A ( self , A , A , A , **A ) -> Union[str, Any]:
'''simple docstring'''
if control.should_evaluate:
__magic_name__ = deepcopy(A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = get_args()
set_seed(args.seed )
__magic_name__ = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__magic_name__ = dataset.train_test_split(test_size=0.2 )
__magic_name__ = train_test['''test'''].train_test_split(test_size=0.5 )
__magic_name__ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__magic_name__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__magic_name__ = False
__magic_name__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(snake_case_ : Optional[Any] ):
__magic_name__ = tokenizer(example['''src'''] , truncation=snake_case_ , max_length=1024 )
__magic_name__ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__magic_name__ = train_test_validation.map(
snake_case_ , batched=snake_case_ , remove_columns=train_test_validation['''train'''].column_names , )
__magic_name__ = DataCollatorWithPadding(tokenizer=snake_case_ )
__magic_name__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=snake_case_ , data_collator=snake_case_ , compute_metrics=snake_case_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main() | 678 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if exponent == 1:
return base
if exponent % 2 == 0:
__magic_name__ = _modexpt(snake_case_ , exponent // 2 , snake_case_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(snake_case_ , exponent - 1 , snake_case_ )) % modulo_value
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 1777 , snake_case_ : int = 1855 , snake_case_ : int = 8 ):
__magic_name__ = base
for _ in range(1 , snake_case_ ):
__magic_name__ = _modexpt(snake_case_ , snake_case_ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 678 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """rwkv"""
_a = {"""max_position_embeddings""": """context_length"""}
def __init__( self , A=5_02_77 , A=10_24 , A=40_96 , A=32 , A=None , A=None , A=1E-5 , A=0 , A=0 , A=6 , A=False , A=True , **A , ) -> List[str]:
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = context_length
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = attention_hidden_size if attention_hidden_size is not None else hidden_size
__magic_name__ = intermediate_size if intermediate_size is not None else 4 * hidden_size
__magic_name__ = layer_norm_epsilon
__magic_name__ = rescale_every
__magic_name__ = use_cache
__magic_name__ = bos_token_id
__magic_name__ = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A ) | 678 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[int] = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """poolformer"""
def __init__( self , A=3 , A=16 , A=16 , A=3 , A=4.0 , A=[2, 2, 6, 2] , A=[64, 1_28, 3_20, 5_12] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[2, 1, 1, 1] , A=4 , A=0.0 , A="gelu" , A=True , A=1E-5 , A=0.02 , **A , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = stride
__magic_name__ = padding
__magic_name__ = pool_size
__magic_name__ = hidden_sizes
__magic_name__ = mlp_ratio
__magic_name__ = depths
__magic_name__ = patch_sizes
__magic_name__ = strides
__magic_name__ = num_encoder_blocks
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_layer_scale
__magic_name__ = layer_scale_init_value
__magic_name__ = initializer_range
super().__init__(**A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 2E-3 | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 1 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> None:
'''simple docstring'''
__magic_name__ = data
__magic_name__ = None
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( snake_case_ : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _SCREAMING_SNAKE_CASE ( snake_case_ : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _SCREAMING_SNAKE_CASE ( ): # Main function for testing.
__magic_name__ = Node(1 )
__magic_name__ = Node(2 )
__magic_name__ = Node(3 )
__magic_name__ = Node(4 )
__magic_name__ = Node(5 )
__magic_name__ = Node(6 )
__magic_name__ = Node(7 )
__magic_name__ = Node(8 )
__magic_name__ = Node(9 )
print(is_full_binary_tree(snake_case_ ) )
print(depth_of_tree(snake_case_ ) )
print('''Tree is: ''' )
display(snake_case_ )
if __name__ == "__main__":
main() | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = AltDiffusionPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__magic_name__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__magic_name__ = CLIPTextModel(A )
__magic_name__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__magic_name__ = 77
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self , A , A=0 ) -> Any:
'''simple docstring'''
if str(A ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(A )
else:
__magic_name__ = torch.Generator(device=A ).manual_seed(A )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> Tuple:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __A ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
torch.manual_seed(0 )
__magic_name__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ = RobertaSeriesModelWithTransformation(A )
__magic_name__ = text_encoder
__magic_name__ = AltDiffusionPipeline(**A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = self.get_dummy_inputs(A )
__magic_name__ = '''A photo of an astronaut'''
__magic_name__ = alt_pipe(**A )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
__magic_name__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ = RobertaSeriesModelWithTransformation(A )
__magic_name__ = text_encoder
__magic_name__ = AltDiffusionPipeline(**A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = self.get_dummy_inputs(A )
__magic_name__ = alt_pipe(**A )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = alt_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
__magic_name__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=A , safety_checker=A )
__magic_name__ = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = alt_pipe([prompt] , generator=A , num_inference_steps=2 , output_type='''numpy''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
import pytest
a_ : Dict = '__dummy_dataset1__'
a_ : Union[str, Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : int ):
__magic_name__ = dataset_loading_script_name
__magic_name__ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=snake_case_ )
__magic_name__ = script_dir / f'{script_name}.py'
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return str(snake_case_ ) | 678 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 1 |
import os
from datetime import datetime as dt
from github import Github
a_ : Optional[Any] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Github(os.environ['''GITHUB_TOKEN'''] )
__magic_name__ = g.get_repo('''huggingface/diffusers''' )
__magic_name__ = repo.get_issues(state='''open''' )
for issue in open_issues:
__magic_name__ = sorted(issue.get_comments() , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
__magic_name__ = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main() | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str=7 ):
__magic_name__ = None
if token is not None:
__magic_name__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__magic_name__ = '''636036'''
__magic_name__ = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__magic_name__ = requests.get(snake_case_ , headers=snake_case_ ).json()
return result["workflow_runs"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = get_daily_ci_runs(snake_case_ )
__magic_name__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__magic_name__ = workflow_run['''id''']
break
return workflow_run_id
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[str] ):
__magic_name__ = get_last_daily_ci_runs(snake_case_ )
if workflow_run_id is not None:
__magic_name__ = get_artifacts_links(worflow_run_id=snake_case_ , token=snake_case_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__magic_name__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case_ , artifact_url=snake_case_ , output_dir=snake_case_ , token=snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Any ):
get_last_daily_ci_artifacts(snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = {}
for artifact_name in artifact_names:
__magic_name__ = os.path.join(snake_case_ , f'{artifact_name}.zip' )
if os.path.isfile(snake_case_ ):
__magic_name__ = {}
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
with z.open(snake_case_ ) as f:
__magic_name__ = f.read().decode('''UTF-8''' )
return results | 678 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 1 |
import argparse
a_ : Optional[Any] = 'docs/source/_static/js/custom.js'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
with open(snake_case_ , encoding='''utf-8''' , newline='''\n''' ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__magic_name__ = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(snake_case_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case_ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
a_ : List[str] = parser.parse_args()
update_custom_js(args.version) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any ):
__magic_name__ = UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
__magic_name__ = downstream_dict['''projector.weight''']
__magic_name__ = downstream_dict['''projector.bias''']
__magic_name__ = downstream_dict['''model.post_net.linear.weight''']
__magic_name__ = downstream_dict['''model.post_net.linear.bias''']
return model
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Any ):
__magic_name__ = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
__magic_name__ = downstream_dict['''model.linear.weight''']
__magic_name__ = downstream_dict['''model.linear.bias''']
return model
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
__magic_name__ = UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ )
__magic_name__ = downstream_dict['''connector.weight''']
__magic_name__ = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__magic_name__ = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__magic_name__ = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__magic_name__ = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )
__magic_name__ = checkpoint['''Downstream''']
__magic_name__ = UniSpeechSatConfig.from_pretrained(snake_case_ )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
__magic_name__ = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__magic_name__ = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__magic_name__ = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForXVector''' ):
__magic_name__ = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__magic_name__ = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ : Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 678 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 1 |
import math
import sys
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = ''''''
try:
with open(snake_case_ , '''rb''' ) as binary_file:
__magic_name__ = binary_file.read()
for dat in data:
__magic_name__ = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = {'''0''': '''0''', '''1''': '''1'''}
__magic_name__ , __magic_name__ = '''''', ''''''
__magic_name__ = len(snake_case_ )
for i in range(len(snake_case_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__magic_name__ = lexicon[curr_string]
result += last_match_id
__magic_name__ = last_match_id + '''0'''
if math.loga(snake_case_ ).is_integer():
__magic_name__ = {}
for curr_key in list(snake_case_ ):
__magic_name__ = lexicon.pop(snake_case_ )
__magic_name__ = new_lex
__magic_name__ = last_match_id + '''1'''
index += 1
__magic_name__ = ''''''
return result
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
__magic_name__ = 8
try:
with open(snake_case_ , '''wb''' ) as opened_file:
__magic_name__ = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case_ ) , snake_case_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case_ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__magic_name__ = data_bits[counter:]
__magic_name__ = data_bits[counter + 1 :]
return data_bits
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
__magic_name__ = read_file_binary(snake_case_ )
__magic_name__ = remove_prefix(snake_case_ )
__magic_name__ = decompress_data(snake_case_ )
write_file_binary(snake_case_ , snake_case_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 678 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """blip_text_model"""
def __init__( self , A=3_05_24 , A=7_68 , A=7_68 , A=30_72 , A=7_68 , A=12 , A=8 , A=5_12 , A="gelu" , A=1E-12 , A=0.0 , A=0.0 , A=0.02 , A=3_05_22 , A=2 , A=0 , A=1_02 , A=True , A=True , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , sep_token_id=A , **A , )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = encoder_hidden_size
__magic_name__ = intermediate_size
__magic_name__ = projection_dim
__magic_name__ = hidden_dropout_prob
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = max_position_embeddings
__magic_name__ = layer_norm_eps
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = is_decoder
__magic_name__ = use_cache
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__magic_name__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """blip_vision_model"""
def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3_84 , A=16 , A="gelu" , A=1E-5 , A=0.0 , A=1E-10 , **A , ) -> int:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = projection_dim
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = patch_size
__magic_name__ = image_size
__magic_name__ = initializer_range
__magic_name__ = attention_dropout
__magic_name__ = layer_norm_eps
__magic_name__ = hidden_act
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__magic_name__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """blip"""
_a = True
def __init__( self , A=None , A=None , A=5_12 , A=2.65_92 , A=2_56 , **A , ) -> Tuple:
'''simple docstring'''
super().__init__(**A )
if text_config is None:
__magic_name__ = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
__magic_name__ = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
__magic_name__ = BlipTextConfig(**A )
__magic_name__ = BlipVisionConfig(**A )
__magic_name__ = self.vision_config.hidden_size
__magic_name__ = projection_dim
__magic_name__ = logit_scale_init_value
__magic_name__ = 1.0
__magic_name__ = 0.02
__magic_name__ = image_text_hidden_size
@classmethod
def __A ( cls , A , A , **A ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = copy.deepcopy(self.__dict__ )
__magic_name__ = self.text_config.to_dict()
__magic_name__ = self.vision_config.to_dict()
__magic_name__ = self.__class__.model_type
return output | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : int ):
__magic_name__ = np.array(snake_case_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
# compute the shape of the output matrix
__magic_name__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__magic_name__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__magic_name__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__magic_name__ = 0
__magic_name__ = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : int ):
__magic_name__ = np.array(snake_case_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
# compute the shape of the output matrix
__magic_name__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__magic_name__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__magic_name__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__magic_name__ = 0
__magic_name__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
a_ : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int=[] ):
__magic_name__ = size[0] - overlap_pixels * 2
__magic_name__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__magic_name__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__magic_name__ = np.pad(snake_case_ , mode='''linear_ramp''' , pad_width=snake_case_ , end_values=0 )
if "l" in remove_borders:
__magic_name__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__magic_name__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__magic_name__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__magic_name__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any] ):
return max(snake_case_ , min(snake_case_ , snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : [int] , snake_case_ : [int] , snake_case_ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : [int] , snake_case_ : int , snake_case_ : [int] ):
__magic_name__ = list(snake_case_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__magic_name__ = clamp_rect(snake_case_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
__magic_name__ = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case_ , (original_slice, 0) )
return result
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] ):
__magic_name__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__magic_name__ = tile.crop(snake_case_ )
return tile
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[Any] ):
__magic_name__ = n % d
return n - divisor
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A = 3_50 , ) -> str:
'''simple docstring'''
super().__init__(
vae=A , text_encoder=A , tokenizer=A , unet=A , low_res_scheduler=A , scheduler=A , max_noise_level=A , )
def __A ( self , A , A , A , A , A , A , A , **A ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__magic_name__ = add_overlap_rect(A , A , image.size )
__magic_name__ = image.crop(A )
__magic_name__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__magic_name__ = translated_slice_x - (original_image_slice / 2)
__magic_name__ = max(0 , A )
__magic_name__ = squeeze_tile(A , A , A , A )
__magic_name__ = to_input.size
__magic_name__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__magic_name__ = super(A , self ).__call__(image=A , **A ).images[0]
__magic_name__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__magic_name__ = unsqueeze_tile(A , A )
__magic_name__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__magic_name__ = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__magic_name__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A ) , mode='''L''' , )
final_image.paste(
A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A )
@torch.no_grad()
def __call__( self , A , A , A = 75 , A = 9.0 , A = 50 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = None , A = 1 , A = 1_28 , A = 32 , A = 32 , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__magic_name__ = math.ceil(image.size[0] / tile_size )
__magic_name__ = math.ceil(image.size[1] / tile_size )
__magic_name__ = tcx * tcy
__magic_name__ = 0
for y in range(A ):
for x in range(A ):
self._process_tile(
A , A , A , A , A , A , A , prompt=A , num_inference_steps=A , guidance_scale=A , noise_level=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _SCREAMING_SNAKE_CASE ( ):
# Run a demo
__magic_name__ = '''stabilityai/stable-diffusion-x4-upscaler'''
__magic_name__ = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case_ , revision='''fp16''' , torch_dtype=torch.floataa )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(snake_case_ : Union[str, Any] ):
print(f'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
__magic_name__ = pipe(image=snake_case_ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case_ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main() | 678 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str ):
__magic_name__ = int(snake_case_ )
assert noofclusters < len(snake_case_ )
# Find out the dimensionality
__magic_name__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
__magic_name__ = list(range(len(snake_case_ ) ) )
shuffle(snake_case_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__magic_name__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__magic_name__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__magic_name__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__magic_name__ = tf.placeholder('''float64''' , [dim] )
__magic_name__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case_ , snake_case_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__magic_name__ = [tf.Variable(0 ) for i in range(len(snake_case_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__magic_name__ = tf.placeholder('''int32''' )
__magic_name__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case_ , snake_case_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__magic_name__ = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__magic_name__ = tf.reduce_mean(snake_case_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__magic_name__ = tf.placeholder('''float''' , [dim] )
__magic_name__ = tf.placeholder('''float''' , [dim] )
__magic_name__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case_ , snake_case_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__magic_name__ = tf.placeholder('''float''' , [noofclusters] )
__magic_name__ = tf.argmin(snake_case_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__magic_name__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__magic_name__ = 100
for _ in range(snake_case_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case_ ) ):
__magic_name__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__magic_name__ = [
sess.run(snake_case_ , feed_dict={va: vect, va: sess.run(snake_case_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__magic_name__ = sess.run(
snake_case_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case_ ):
# Collect all the vectors assigned to this cluster
__magic_name__ = [
vectors[i]
for i in range(len(snake_case_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__magic_name__ = sess.run(
snake_case_ , feed_dict={mean_input: array(snake_case_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__magic_name__ = sess.run(snake_case_ )
__magic_name__ = sess.run(snake_case_ )
return centroids, assignments | 678 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
__magic_name__ = word.split()
def justify(snake_case_ : list , snake_case_ : int , snake_case_ : int ) -> str:
__magic_name__ = max_width - width
__magic_name__ = len(snake_case_ )
if len(snake_case_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case_ ):
num_spaces_between_words_list[i] += 1
__magic_name__ = []
for i in range(snake_case_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case_ )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = 0
for word in words:
if width + len(snake_case_ ) + len(snake_case_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case_ )
width += len(snake_case_ )
else:
# justify the line and add it to result
answer.append(justify(snake_case_ , snake_case_ , snake_case_ ) )
# reset new line and new width
__magic_name__ , __magic_name__ = [word], len(snake_case_ )
__magic_name__ = max_width - width - len(snake_case_ )
answer.append(''' '''.join(snake_case_ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 1 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float = 1E-12 , snake_case_ : int = 100 , ):
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ )
__magic_name__ = np.iscomplexobj(snake_case_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 1E12
while not convergence:
# Multiple matrix by the vector.
__magic_name__ = np.dot(snake_case_ , snake_case_ )
# Normalize the resulting output vector.
__magic_name__ = w / np.linalg.norm(snake_case_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__magic_name__ = vector.conj().T if is_complex else vector.T
__magic_name__ = np.dot(snake_case_ , np.dot(snake_case_ , snake_case_ ) )
# Check convergence.
__magic_name__ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__magic_name__ = True
__magic_name__ = lambda_
if is_complex:
__magic_name__ = np.real(lambda_ )
return lambda_, vector
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__magic_name__ = np.array([41, 4, 20] )
__magic_name__ = real_input_matrix.astype(np.complexaaa )
__magic_name__ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__magic_name__ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__magic_name__ = real_input_matrix
__magic_name__ = real_vector
elif problem_type == "complex":
__magic_name__ = complex_input_matrix
__magic_name__ = complex_vector
# Our implementation.
__magic_name__ , __magic_name__ = power_iteration(snake_case_ , snake_case_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__magic_name__ , __magic_name__ = np.linalg.eigh(snake_case_ )
# Last eigenvalue is the maximum one.
__magic_name__ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__magic_name__ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 678 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : str = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """mvp"""
_a = ["""past_key_values"""]
_a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A=5_02_67 , A=10_24 , A=12 , A=40_96 , A=16 , A=12 , A=40_96 , A=16 , A=0.0 , A=0.0 , A="gelu" , A=10_24 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0.0 , A=False , A=True , A=1 , A=0 , A=2 , A=True , A=2 , A=2 , A=False , A=1_00 , A=8_00 , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = classifier_dropout
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ = use_prompt
__magic_name__ = prompt_length
__magic_name__ = prompt_mid_dim
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , A ):
__magic_name__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' ) | 678 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod() | 678 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[Any] = 'Hello world! cécé herlolip'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ):
__magic_name__ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
__magic_name__ = roberta.model.encoder.sentence_encoder
__magic_name__ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__magic_name__ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , snake_case_ )
__magic_name__ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__magic_name__ = roberta_sent_encoder.embed_tokens.weight
__magic_name__ = roberta_sent_encoder.embed_positions.weight
__magic_name__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__magic_name__ = roberta_sent_encoder.layer_norm.weight
__magic_name__ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__magic_name__ = model.roberta.encoder.layer[i]
__magic_name__ = roberta_sent_encoder.layers[i]
__magic_name__ = layer.attention
__magic_name__ = roberta_layer.self_attn_layer_norm.weight
__magic_name__ = roberta_layer.self_attn_layer_norm.bias
# self attention
__magic_name__ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__magic_name__ = roberta_layer.self_attn.q_proj.weight
__magic_name__ = roberta_layer.self_attn.q_proj.bias
__magic_name__ = roberta_layer.self_attn.k_proj.weight
__magic_name__ = roberta_layer.self_attn.k_proj.bias
__magic_name__ = roberta_layer.self_attn.v_proj.weight
__magic_name__ = roberta_layer.self_attn.v_proj.bias
# self-attention output
__magic_name__ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__magic_name__ = roberta_layer.self_attn.out_proj.weight
__magic_name__ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__magic_name__ = roberta_layer.final_layer_norm.weight
__magic_name__ = roberta_layer.final_layer_norm.bias
# intermediate
__magic_name__ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__magic_name__ = roberta_layer.fca.weight
__magic_name__ = roberta_layer.fca.bias
# output
__magic_name__ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__magic_name__ = roberta_layer.fca.weight
__magic_name__ = roberta_layer.fca.bias
# end of layer
if classification_head:
__magic_name__ = roberta.model.classification_heads['''mnli'''].dense.weight
__magic_name__ = roberta.model.classification_heads['''mnli'''].dense.bias
__magic_name__ = roberta.model.classification_heads['''mnli'''].out_proj.weight
__magic_name__ = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__magic_name__ = roberta.model.encoder.lm_head.dense.weight
__magic_name__ = roberta.model.encoder.lm_head.dense.bias
__magic_name__ = roberta.model.encoder.lm_head.layer_norm.weight
__magic_name__ = roberta.model.encoder.lm_head.layer_norm.bias
__magic_name__ = roberta.model.encoder.lm_head.weight
__magic_name__ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__magic_name__ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
__magic_name__ = model(snake_case_ )[0]
if classification_head:
__magic_name__ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(snake_case_ ) )
else:
__magic_name__ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
__magic_name__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__magic_name__ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a_ : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
a_ : List[Any] = {str(digit): digit**5 for digit in range(10)}
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( ):
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case_ ) )
if __name__ == "__main__":
print(solution()) | 678 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a_ : Optional[Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , A , A , A , A=None ) -> List[Any]:
'''simple docstring'''
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
__magic_name__ = None
def __A ( self , A ) -> str:
'''simple docstring'''
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
__magic_name__ = self._infer_socket_ifname()
# avoid clash with the NCCL port
__magic_name__ = str(distributed_port + 1 )
__magic_name__ = dist.new_group(ranks=A , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self ) -> str:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def __A ( self , A , A , A=torch.floataa ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__magic_name__ = next((addr for addr in addrs if addr.startswith('''e''' )) , A )
return ifname
def __A ( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
__magic_name__ , __magic_name__ = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
__magic_name__ = dist.get_world_size(group=self.process_group )
# gather logic
__magic_name__ = None
if self._is_main():
__magic_name__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
__magic_name__ = question_hidden_states.shape[0]
__magic_name__ = []
__magic_name__ = []
if self._is_main():
assert len(A ) == world_size
__magic_name__ , __magic_name__ = self._main_retrieve(torch.cat(A ).numpy() , A )
__magic_name__ , __magic_name__ = torch.tensor(A ), torch.tensor(A )
__magic_name__ = self._chunk_tensor(A , A )
__magic_name__ = self._chunk_tensor(A , A )
__magic_name__ = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
__magic_name__ = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A ) | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 1 |
from functools import reduce
a_ : Optional[int] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , A , )
super().__init__(*A , **A ) | 678 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(snake_case_ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = inspect.getfile(accelerate.test_utils )
__magic_name__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__magic_name__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__magic_name__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __A ( self ) -> Optional[int]:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
__magic_name__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ) -> Any:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
__magic_name__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
__magic_name__ = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
a_ : int = Accelerator()
a_ : List[str] = (accelerator.state.process_index + 2, 10)
a_ : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
a_ : List[str] = ''
a_ : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a_ : str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a_ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 678 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Tuple ):
# Initialise PyTorch model
__magic_name__ = AlbertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = AlbertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Dict ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Dict ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Dict ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = ParquetDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : List[Any] ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] ):
if issubclass(snake_case_ , snake_case_ ):
__magic_name__ = parquet_path
elif issubclass(snake_case_ , snake_case_ ):
__magic_name__ = [parquet_path]
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Union[str, Any]=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__magic_name__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = ParquetDatasetReader({'''train''': parquet_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
if split:
__magic_name__ = {split: parquet_path}
else:
__magic_name__ = '''train'''
__magic_name__ = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int ):
__magic_name__ = ParquetDatasetWriter(snake_case_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ = pf.read()
assert dataset.data.table == output_table
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Optional[Any] ):
__magic_name__ = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ = {'''image''': [image_path]}
__magic_name__ = Features({'''image''': Image()} )
__magic_name__ = Dataset.from_dict(snake_case_ , features=snake_case_ )
__magic_name__ = ParquetDatasetWriter(snake_case_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=snake_case_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any ):
assert get_writer_batch_size(snake_case_ ) == expected | 678 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 1 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any=None ):
if subparsers is not None:
__magic_name__ = subparsers.add_parser('''test''' )
else:
__magic_name__ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=snake_case_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__magic_name__ = script_name
else:
__magic_name__ = f'--config_file={args.config_file} {script_name}'
__magic_name__ = ['''accelerate-launch'''] + test_args.split()
__magic_name__ = execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = test_command_parser()
__magic_name__ = parser.parse_args()
test_command(snake_case_ )
if __name__ == "__main__":
main() | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : List[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """focalnet"""
def __init__( self , A=2_24 , A=4 , A=3 , A=96 , A=False , A=[1_92, 3_84, 7_68, 7_68] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1E-4 , A=False , A=False , A=False , A=0.02 , A=1E-5 , A=32 , A=None , A=None , **A , ) -> str:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = use_conv_embed
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = focal_levels
__magic_name__ = focal_windows
__magic_name__ = hidden_act
__magic_name__ = mlp_ratio
__magic_name__ = hidden_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = use_layerscale
__magic_name__ = layerscale_value
__magic_name__ = use_post_layernorm
__magic_name__ = use_post_layernorm_in_modulation
__magic_name__ = normalize_modulator
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = encoder_stride
__magic_name__ = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__magic_name__ , __magic_name__ = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names ) | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self , A , A ) -> List[Any]:
'''simple docstring'''
super().__init__()
__magic_name__ = module
__magic_name__ = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
__magic_name__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , A , *A , **A ) -> Optional[Any]:
'''simple docstring'''
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
_a = """bigscience/bloom-1b7"""
# Constant values
_a = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
_a = """Hello my name is"""
_a = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_a = 10
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
__magic_name__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__magic_name__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
__magic_name__ = config.to_dict()
__magic_name__ = config.to_diff_dict()
__magic_name__ = config.to_json_string()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__magic_name__ = self.model_fpaa.get_memory_footprint()
__magic_name__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__magic_name__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
__magic_name__ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = BitsAndBytesConfig()
__magic_name__ = True
__magic_name__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
__magic_name__ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = BitsAndBytesConfig()
with self.assertRaises(A ):
__magic_name__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
__magic_name__ = self.model_fpaa.to(torch.floataa )
__magic_name__ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__magic_name__ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__magic_name__ = self.model_fpaa.half()
# Check this does not throw an error
__magic_name__ = self.model_fpaa.float()
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls ) -> List[str]:
'''simple docstring'''
__magic_name__ = '''t5-small'''
__magic_name__ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__magic_name__ = AutoTokenizer.from_pretrained(cls.model_name )
__magic_name__ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Optional[int]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
__magic_name__ = TaForConditionalGeneration._keep_in_fpaa_modules
__magic_name__ = None
# test with `t5-small`
__magic_name__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__magic_name__ = model.generate(**A )
# test with `flan-t5-small`
__magic_name__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__magic_name__ = model.generate(**A )
__magic_name__ = modules
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__magic_name__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__magic_name__ = model.generate(**A )
# test with `flan-t5-small`
__magic_name__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__magic_name__ = model.generate(**A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# model_name
__magic_name__ = '''bigscience/bloom-560m'''
__magic_name__ = '''t5-small'''
# Different types of model
__magic_name__ = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
__magic_name__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
def __A ( self ) -> List[Any]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__magic_name__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__magic_name__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__magic_name__ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> int:
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__magic_name__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__magic_name__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__magic_name__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
__magic_name__ = LoRALayer(module.q_proj , rank=16 )
__magic_name__ = LoRALayer(module.k_proj , rank=16 )
__magic_name__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__magic_name__ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__magic_name__ = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """gpt2-xl"""
_a = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7 | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = r'''\w+[.]\d+'''
__magic_name__ = re.findall(snake_case_ , snake_case_ )
for pat in pats:
__magic_name__ = key.replace(snake_case_ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : int ):
__magic_name__ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__magic_name__ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__magic_name__ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__magic_name__ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__magic_name__ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__magic_name__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__magic_name__ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__magic_name__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__magic_name__ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__magic_name__ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
__magic_name__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__magic_name__ = flax_model.init_weights(PRNGKey(snake_case_ ) )
__magic_name__ = flatten_dict(snake_case_ )
__magic_name__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__magic_name__ , __magic_name__ = rename_key_and_reshape_tensor(snake_case_ , snake_case_ , snake_case_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__magic_name__ = jnp.asarray(snake_case_ )
return unflatten_dict(snake_case_ ) | 678 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : List[str] ):
__magic_name__ = 0
if start < end:
__magic_name__ = randint(snake_case_ , snake_case_ )
__magic_name__ = a[end]
__magic_name__ = a[pivot]
__magic_name__ = temp
__magic_name__ , __magic_name__ = _in_place_partition(snake_case_ , snake_case_ , snake_case_ )
count += _in_place_quick_sort(snake_case_ , snake_case_ , p - 1 )
count += _in_place_quick_sort(snake_case_ , p + 1 , snake_case_ )
return count
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple ):
__magic_name__ = 0
__magic_name__ = randint(snake_case_ , snake_case_ )
__magic_name__ = a[end]
__magic_name__ = a[pivot]
__magic_name__ = temp
__magic_name__ = start - 1
for index in range(snake_case_ , snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__magic_name__ = new_pivot_index + 1
__magic_name__ = a[new_pivot_index]
__magic_name__ = a[index]
__magic_name__ = temp
__magic_name__ = a[new_pivot_index + 1]
__magic_name__ = a[end]
__magic_name__ = temp
return new_pivot_index + 1, count
a_ : Any = TemporaryFile()
a_ : Tuple = 100 # 1000 elements are to be sorted
a_ , a_ : Any = 0, 1 # mean and standard deviation
a_ : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
a_ : Dict = np.load(outfile)
a_ : List[Any] = len(M) - 1
a_ : Optional[Any] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 678 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.dummy_uncond_unet
__magic_name__ = PNDMScheduler()
__magic_name__ = PNDMPipeline(unet=A , scheduler=A )
pndm.to(A )
pndm.set_progress_bar_config(disable=A )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=A , num_inference_steps=20 , output_type='''numpy''' ).images
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=A , num_inference_steps=20 , output_type='''numpy''' , return_dict=A )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = '''google/ddpm-cifar10-32'''
__magic_name__ = UNetaDModel.from_pretrained(A )
__magic_name__ = PNDMScheduler()
__magic_name__ = PNDMPipeline(unet=A , scheduler=A )
pndm.to(A )
pndm.set_progress_bar_config(disable=A )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=A , output_type='''numpy''' ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 678 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 1 |
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] ):
__magic_name__ = []
__magic_name__ = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__magic_name__ = subprocess.run(snake_case_ , shell=snake_case_ , stdout=subprocess.PIPE )
__magic_name__ = output.stdout.decode('''utf-8''' )
__magic_name__ = json.loads(snake_case_ )
__magic_name__ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(snake_case_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(snake_case_ ) )
if len(snake_case_ ) > 0:
__magic_name__ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
return values.split(''',''' )
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
a_ : int = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 678 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
__magic_name__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__magic_name__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any]="facebook/mbart-large-en-ro" , snake_case_ : Union[str, Any]=False , snake_case_ : Any=False ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(snake_case_ )
__magic_name__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ = MBartConfig.from_pretrained(snake_case_ , vocab_size=snake_case_ )
if mbart_aa and finetuned:
__magic_name__ = '''relu'''
__magic_name__ = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ = MBartForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ )
if finetuned:
__magic_name__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
a_ : str = parser.parse_args()
a_ : Dict = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 678 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a_ : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """AutoTokenizer"""
_a = ["""tokenizer"""]
_a = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , A , A=None ) -> List[str]:
'''simple docstring'''
super().__init__(A )
__magic_name__ = speaker_embeddings
@classmethod
def __A ( cls , A , A="speaker_embeddings_path.json" , **A ) -> List[str]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__magic_name__ = get_file_from_repo(
A , A , subfolder=kwargs.pop('''subfolder''' , A ) , cache_dir=kwargs.pop('''cache_dir''' , A ) , force_download=kwargs.pop('''force_download''' , A ) , proxies=kwargs.pop('''proxies''' , A ) , resume_download=kwargs.pop('''resume_download''' , A ) , local_files_only=kwargs.pop('''local_files_only''' , A ) , use_auth_token=kwargs.pop('''use_auth_token''' , A ) , revision=kwargs.pop('''revision''' , A ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(A , A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__magic_name__ = None
else:
with open(A ) as speaker_embeddings_json:
__magic_name__ = json.load(A )
else:
__magic_name__ = None
__magic_name__ = AutoTokenizer.from_pretrained(A , **A )
return cls(tokenizer=A , speaker_embeddings=A )
def __A ( self , A , A="speaker_embeddings_path.json" , A="speaker_embeddings" , A = False , **A , ) -> Dict:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A , A , '''v2''' ) , exist_ok=A )
__magic_name__ = {}
__magic_name__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__magic_name__ = self._load_voice_preset(A )
__magic_name__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , A , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=A , )
__magic_name__ = os.path.join(A , F'{prompt_key}_{key}.npy' )
__magic_name__ = tmp_dict
with open(os.path.join(A , A ) , '''w''' ) as fp:
json.dump(A , A )
super().save_pretrained(A , A , **A )
def __A ( self , A = None , **A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.speaker_embeddings[voice_preset]
__magic_name__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__magic_name__ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , A ) , cache_dir=kwargs.pop('''cache_dir''' , A ) , force_download=kwargs.pop('''force_download''' , A ) , proxies=kwargs.pop('''proxies''' , A ) , resume_download=kwargs.pop('''resume_download''' , A ) , local_files_only=kwargs.pop('''local_files_only''' , A ) , use_auth_token=kwargs.pop('''use_auth_token''' , A ) , revision=kwargs.pop('''revision''' , A ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__magic_name__ = np.load(A )
return voice_preset_dict
def __A ( self , A = None ) -> Any:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , A=None , A=None , A="pt" , A=2_56 , A=False , A=True , A=False , **A , ) -> int:
'''simple docstring'''
if voice_preset is not None and not isinstance(A , A ):
if (
isinstance(A , A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__magic_name__ = self._load_voice_preset(A )
else:
if isinstance(A , A ) and not voice_preset.endswith('''.npz''' ):
__magic_name__ = voice_preset + '''.npz'''
__magic_name__ = np.load(A )
if voice_preset is not None:
self._validate_voice_preset_dict(A , **A )
__magic_name__ = BatchFeature(data=A , tensor_type=A )
__magic_name__ = self.tokenizer(
A , return_tensors=A , padding='''max_length''' , max_length=A , return_attention_mask=A , return_token_type_ids=A , add_special_tokens=A , **A , )
if voice_preset is not None:
__magic_name__ = voice_preset
return encoded_text | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = self.get_masked_index(A )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(A , A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def __A ( self , A , A=None , **A ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(A , return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.model(**A )
__magic_name__ = model_inputs['''input_ids''']
return model_outputs
def __A ( self , A , A=5 , A=None ) -> Optional[int]:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs['''input_ids'''][0]
__magic_name__ = model_outputs['''logits''']
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(A , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(A , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(A , 0 )
__magic_name__ = tf.math.top_k(A , k=A )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(A )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(A , skip_special_tokens=A )
__magic_name__ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def __A ( self , A , A=None ) -> int:
'''simple docstring'''
if isinstance(A , A ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(A , A )
if id_ is None:
__magic_name__ = self.tokenizer(
A , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , max_length=1 , truncation=A , )['''input_ids''']
if len(A ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(A ) )
if len(A ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__magic_name__ = np.array(A )
return target_ids
def __A ( self , A=None , A=None ) -> List[Any]:
'''simple docstring'''
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(A , A )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , A , *A , **A ) -> Any:
'''simple docstring'''
__magic_name__ = super().__call__(A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs | 678 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = str(id_ )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = []
__magic_name__ = {} # {vertex:distance}
def __lt__( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> int:
'''simple docstring'''
return self.id
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
self.neighbors.append(A )
def __A ( self , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = weight
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ):
__magic_name__ = []
for u in graph:
__magic_name__ = math.inf
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = graph[:]
while q:
__magic_name__ = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__magic_name__ = u
__magic_name__ = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ):
for u in graph:
__magic_name__ = math.inf
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__magic_name__ = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__magic_name__ = u
__magic_name__ = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__magic_name__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__magic_name__ = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__magic_name__ = tf_top_k_top_p_filtering(A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__magic_name__ = output[output != -float('''inf''' )]
__magic_name__ = tf.cast(
tf.where(tf.not_equal(A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(A , A , rtol=1E-12 )
tf.debugging.assert_equal(A , A )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if is_tf_available():
_a = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = 2
__magic_name__ = 2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , A ) -> Any:
'''simple docstring'''
super(A , self ).__init__()
__magic_name__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def __A ( self , A , A ) -> int:
'''simple docstring'''
__magic_name__ = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
__magic_name__ = [[2, 0], [1_02, 1_03]]
__magic_name__ = [[1, 0], [1, 1]]
__magic_name__ = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
__magic_name__ = tf.saved_model.load(A ).signatures['''serving_default''']
for batch_size in range(1 , len(A ) + 1 ):
__magic_name__ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__magic_name__ = serving_func(**A )['''sequences''']
__magic_name__ = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = 1
__magic_name__ = 2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , A ) -> str:
'''simple docstring'''
super(A , self ).__init__()
__magic_name__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def __A ( self , A , A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
__magic_name__ = [[2], [1_02, 1_03]]
__magic_name__ = [[1], [1, 1]]
__magic_name__ = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
__magic_name__ = tf.saved_model.load(A ).signatures['''serving_default''']
for input_row in range(len(A ) ):
__magic_name__ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__magic_name__ = serving_func(**A )['''sequences''']
__magic_name__ = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
@require_tensorflow_text
def __A ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=A )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__magic_name__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A , '''spiece.model''' ) , '''rb''' ).read() )
__magic_name__ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __A ( self , A , *A , **A ) -> Dict:
'''simple docstring'''
__magic_name__ = self.tokenizer.tokenize(A )
__magic_name__ , __magic_name__ = text.pad_model_inputs(
A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__magic_name__ = self.model.generate(input_ids=A , attention_mask=A )
return self.tokenizer.detokenize(A )
__magic_name__ = CompleteSentenceTransformer()
__magic_name__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__magic_name__ = complete_model(A )
__magic_name__ = tf.keras.Model(A , A )
keras_model.save(A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__magic_name__ = 14
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = '''Hello, my dog is cute and'''
__magic_name__ = tokenizer(A , return_tensors='''tf''' )
__magic_name__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__magic_name__ = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__magic_name__ = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__magic_name__ = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__magic_name__ = '''Hugging Face is a technology company based in New York and Paris.'''
__magic_name__ = bart_tokenizer(A , return_tensors='''tf''' ).input_ids
__magic_name__ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__magic_name__ = bart_model.generate(A ).numpy()
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self , A , A=None , **A ) -> Optional[int]:
'''simple docstring'''
return super().call(A , **A )
__magic_name__ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__magic_name__ = bart_model.generate(A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(A , A ) )
class SCREAMING_SNAKE_CASE_ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __A ( self , A , **A ) -> Any:
'''simple docstring'''
return super().call(A , **A )
__magic_name__ = FakeEncoder(bart_model.config , bart_model.model.shared )
__magic_name__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__magic_name__ = bart_model.generate(A ).numpy()
with self.assertRaises(A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A , foo='''bar''' ) | 678 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=1024 , snake_case_ : Optional[int]=1024 , snake_case_ : Optional[Any]=False , **snake_case_ : Dict ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = SeqaSeqDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ , type_path='''train''' , **snake_case_ )
__magic_name__ = tok.pad_token_id
def get_lens(snake_case_ : Union[str, Any] ):
__magic_name__ = tqdm(
DataLoader(snake_case_ , batch_size=512 , num_workers=8 , shuffle=snake_case_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__magic_name__ = []
for batch in dl:
__magic_name__ = batch['''input_ids'''].ne(snake_case_ ).sum(1 ).tolist()
__magic_name__ = batch['''labels'''].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_ , snake_case_ ):
max_lens.append(max(snake_case_ , snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
__magic_name__ = get_lens(snake_case_ )
__magic_name__ = SeqaSeqDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ , type_path='''val''' , **snake_case_ )
__magic_name__ = get_lens(snake_case_ )
pickle_save(snake_case_ , train_ds.len_file )
pickle_save(snake_case_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
from collections import defaultdict
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A ) -> int:
'''simple docstring'''
__magic_name__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__magic_name__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(A ) )
]
__magic_name__ = defaultdict(A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__magic_name__ = (1 << len(A )) - 1
def __A ( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__magic_name__ = self.count_ways_until(A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__magic_name__ = total_ways_util
return self.dp[mask][task_no]
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
for i in range(len(A ) ):
for j in task_performed[i]:
self.task[j].append(A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
a_ : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a_ : Tuple = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
from math import factorial
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 100 ):
return sum(int(snake_case_ ) for x in str(factorial(snake_case_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 678 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
import qiskit
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
__magic_name__ = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__magic_name__ = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__magic_name__ = qiskit.execute(snake_case_ , snake_case_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 678 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ = 0
__magic_name__ = str(snake_case_ )
while len(snake_case_ ) != 1:
__magic_name__ = [int(snake_case_ ) for i in num_string]
__magic_name__ = 1
for i in range(0 , len(snake_case_ ) ):
total *= numbers[i]
__magic_name__ = str(snake_case_ )
steps += 1
return steps
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ = 0
__magic_name__ = str(snake_case_ )
while len(snake_case_ ) != 1:
__magic_name__ = [int(snake_case_ ) for i in num_string]
__magic_name__ = 0
for i in range(0 , len(snake_case_ ) ):
total += numbers[i]
__magic_name__ = str(snake_case_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 1 |
from __future__ import annotations
a_ : Optional[Any] = []
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int ):
for i in range(len(snake_case_ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ):
if board[i][j] == 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int ):
if row >= len(snake_case_ ):
solution.append(snake_case_ )
printboard(snake_case_ )
print()
return True
for i in range(len(snake_case_ ) ):
if is_safe(snake_case_ , snake_case_ , snake_case_ ):
__magic_name__ = 1
solve(snake_case_ , row + 1 )
__magic_name__ = 0
return False
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] ):
for i in range(len(snake_case_ ) ):
for j in range(len(snake_case_ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
a_ : Any = 8
a_ : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution)) | 678 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = GPTaTokenizer
_a = GPTaTokenizerFast
_a = True
_a = {"""add_prefix_space""": True}
_a = False
def __A ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__magic_name__ = dict(zip(A , range(len(A ) ) ) )
__magic_name__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__magic_name__ = {'''unk_token''': '''<unk>'''}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __A ( self , **A ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __A ( self , A ) -> Any:
'''simple docstring'''
__magic_name__ = '''lower newer'''
__magic_name__ = '''lower newer'''
return input_text, output_text
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ = '''lower newer'''
__magic_name__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__magic_name__ = tokenizer.tokenize(A , add_prefix_space=A )
self.assertListEqual(A , A )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer(add_prefix_space=A )
__magic_name__ = '''lower newer'''
# Testing tokenization
__magic_name__ = tokenizer.tokenize(A , add_prefix_space=A )
__magic_name__ = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
__magic_name__ = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
__magic_name__ = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
__magic_name__ = self.get_rust_tokenizer(add_prefix_space=A )
__magic_name__ = tokenizer.encode(A , add_prefix_space=A )
__magic_name__ = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# Testing the unknown token
__magic_name__ = tokens + [rust_tokenizer.unk_token]
__magic_name__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self , *A , **A ) -> List[Any]:
'''simple docstring'''
pass
def __A ( self , A=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2''']
__magic_name__ = ('''This is a simple input''', '''This is a pair''')
__magic_name__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
__magic_name__ = ('''This is a simple input''', '''This is a pair''')
__magic_name__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__magic_name__ = tokenizer.pad_token_id
__magic_name__ = tokenizer(A , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__magic_name__ = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
__magic_name__ = tokenizer(*A , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__magic_name__ = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = '''$$$'''
__magic_name__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A )
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2''']
__magic_name__ = tokenizer.bos_token_id
__magic_name__ = tokenizer(A )
__magic_name__ = tokenizer(A )
self.assertEqual(out_s.input_ids[0] , A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__magic_name__ = tokenizer.decode(out_s.input_ids )
__magic_name__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = [self.get_tokenizer(do_lower_case=A , add_bos_token=A )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ = '''Encode this.'''
__magic_name__ = '''This one too please.'''
__magic_name__ = tokenizer.encode(A , add_special_tokens=A )
encoded_sequence += tokenizer.encode(A , add_special_tokens=A )
__magic_name__ = tokenizer.encode_plus(
A , A , add_special_tokens=A , return_special_tokens_mask=A , )
__magic_name__ = encoded_sequence_dict['''input_ids''']
__magic_name__ = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(A ) , len(A ) )
__magic_name__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A )
]
__magic_name__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(A , A )
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A )
__magic_name__ = '''A photo of a cat'''
__magic_name__ = tokenizer.encode(
A , )
self.assertEqual(A , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
__magic_name__ = AutoTokenizer.from_pretrained('''./test_opt''' )
__magic_name__ = tokenizer.encode(
A , )
self.assertEqual(A , [2, 2_50, 13_45, 9, 10, 47_58] )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=A )
__magic_name__ = '''A photo of a cat'''
__magic_name__ = tokenizer.encode(
A , )
# Same as above
self.assertEqual(A , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A )
__magic_name__ = '''bos'''
__magic_name__ = tokenizer.get_vocab()['''bos''']
__magic_name__ = '''A photo of a cat'''
__magic_name__ = tokenizer.encode(
A , )
# We changed the bos token
self.assertEqual(A , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
__magic_name__ = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__magic_name__ = tokenizer.encode(
A , )
self.assertEqual(A , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a_ : Optional[Any] = None
a_ : int = logging.get_logger(__name__)
a_ : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ : str = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
a_ : Union[str, Any] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
a_ : Optional[Any] = '▁'
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = AlbertTokenizer
def __init__( self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
__magic_name__ = do_lower_case
__magic_name__ = remove_space
__magic_name__ = keep_accents
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,) | 678 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ : Tuple = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
a_ : str = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
a_ : Union[str, Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
a_ : Optional[int] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
a_ : Any = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
for tf_name, hf_name in patterns:
__magic_name__ = k.replace(snake_case_ , snake_case_ )
return k
def _SCREAMING_SNAKE_CASE ( snake_case_ : dict , snake_case_ : dict ):
__magic_name__ = BigBirdPegasusConfig(**snake_case_ )
__magic_name__ = BigBirdPegasusForConditionalGeneration(snake_case_ )
__magic_name__ = torch_model.state_dict()
__magic_name__ = {}
# separating decoder weights
__magic_name__ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__magic_name__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
__magic_name__ = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
__magic_name__ = DECODER_PATTERNS
__magic_name__ = rename_state_dict_key(snake_case_ , snake_case_ )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__magic_name__ = v.T
__magic_name__ = torch.from_numpy(snake_case_ )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
__magic_name__ = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
__magic_name__ = REMAINING_PATTERNS
__magic_name__ = rename_state_dict_key(snake_case_ , snake_case_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__magic_name__ = v.T
__magic_name__ = torch.from_numpy(snake_case_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__magic_name__ = mapping['''model.embed_positions.weight''']
__magic_name__ = mapping.pop('''model.embed_positions.weight''' )
__magic_name__ , __magic_name__ = torch_model.load_state_dict(snake_case_ , strict=snake_case_ )
__magic_name__ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = tf.train.list_variables(snake_case_ )
__magic_name__ = {}
__magic_name__ = ['''global_step''']
for name, shape in tqdm(snake_case_ , desc='''converting tf checkpoint to dict''' ):
__magic_name__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
__magic_name__ = tf.train.load_variable(snake_case_ , snake_case_ )
__magic_name__ = array
return tf_weights
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : dict ):
__magic_name__ = get_tf_weights_as_numpy(snake_case_ )
__magic_name__ = convert_bigbird_pegasus(snake_case_ , snake_case_ )
torch_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
a_ : Dict = parser.parse_args()
a_ : int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 678 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
a_ : Dict = trt.Logger(trt.Logger.WARNING)
a_ : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
a_ : Optional[int] = logging.getLogger(__name__)
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
a_ : str = parser.parse_args()
if args.tokenizer_name:
a_ : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
a_ : Tuple = args.per_device_eval_batch_size
a_ : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
a_ : Optional[Any] = True
a_ : List[Any] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
a_ : Any = 'temp_engine/bert-fp16.engine'
if args.inta:
a_ : int = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
a_ : List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
a_ : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
a_ : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
a_ : Optional[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
a_ : List[str] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
a_ : List[str] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__magic_name__ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__magic_name__ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case_ )
# start time
__magic_name__ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case_ ) for d_inp in d_inputs] + [int(snake_case_ ), int(snake_case_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__magic_name__ = time.time()
__magic_name__ = end_time - start_time
__magic_name__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
a_ : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a_ : Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
a_ : Tuple = raw_datasets['validation'].column_names
a_ : Optional[int] = 'question' if 'question' in column_names else column_names[0]
a_ : List[Any] = 'context' if 'context' in column_names else column_names[1]
a_ : Optional[Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
a_ : Optional[int] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
a_ : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__magic_name__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__magic_name__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case_ , stride=args.doc_stride , return_overflowing_tokens=snake_case_ , return_offsets_mapping=snake_case_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__magic_name__ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__magic_name__ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__magic_name__ = tokenized_examples.sequence_ids(snake_case_ )
__magic_name__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__magic_name__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__magic_name__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
a_ : Dict = raw_datasets['validation']
# Validation Feature Creation
a_ : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
a_ : List[str] = default_data_collator
a_ : Tuple = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
a_ : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__magic_name__ = postprocess_qa_predictions(
examples=snake_case_ , features=snake_case_ , predictions=snake_case_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__magic_name__ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__magic_name__ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__magic_name__ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case_ , label_ids=snake_case_ )
a_ : Any = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
return trt.volume(engine.get_binding_shape(snake_case_ ) ) * engine.get_binding_dtype(snake_case_ ).itemsize
# Allocate device memory for inputs and outputs.
a_ : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
a_ : Tuple = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
a_ : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
a_ : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
a_ : List[str] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
a_ : Optional[int] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
a_ : Tuple = 0.0
a_ : List[Any] = 0
a_ : int = timeit.default_timer()
a_ : Dict = None
for step, batch in enumerate(eval_dataloader):
a_ , a_ : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
a_ , a_ : Union[str, Any] = outputs
a_ : Any = torch.tensor(start_logits)
a_ : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
a_ : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
a_ : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
a_ : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
a_ : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
a_ : Optional[Any] = nested_truncate(all_preds, len(eval_dataset))
a_ : Optional[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
a_ : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
a_ : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""") | 678 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : int = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] ):
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_a = field(metadata={"""help""": """Should contain the data files for the task."""} )
_a = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
try:
__magic_name__ = processors[data_args.task_name]()
__magic_name__ = processor.get_labels()
__magic_name__ = len(snake_case_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__magic_name__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
# Get datasets
__magic_name__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__magic_name__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case_ : EvalPrediction ) -> Dict:
__magic_name__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case_ , p.label_ids )}
# Data collator
__magic_name__ = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , compute_metrics=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ = trainer.evaluate()
__magic_name__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(snake_case_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , snake_case_ , snake_case_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(snake_case_ )
return results
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 678 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : int = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """conditional_detr"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=3_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=2 , A=5 , A=2 , A=1 , A=1 , A=2 , A=5 , A=2 , A=0.25 , **A , ) -> List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = cls_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = focal_alpha
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__magic_name__ = self.backbone_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : str ):
__magic_name__ = 1.5
__magic_name__ = int(factor * num_class_images )
__magic_name__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=snake_case_ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__magic_name__ = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__magic_name__ = int(factor * num_images )
__magic_name__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 , )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(desc='''downloading real regularization images''' , total=snake_case_ )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
__magic_name__ = class_images[count]
count += 1
try:
__magic_name__ = requests.get(images['''url'''] )
if img.status_code == 200:
__magic_name__ = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser('''''' , add_help=snake_case_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case_ , type=snake_case_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
a_ : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 678 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 1 |
import warnings
from .generation import TFGenerationMixin
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , SCREAMING_SNAKE_CASE__ , ) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ : bytes , snake_case_ : int ):
__magic_name__ = f'{sampling_rate}'
__magic_name__ = '''1'''
__magic_name__ = '''f32le'''
__magic_name__ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(snake_case_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__magic_name__ = ffmpeg_process.communicate(snake_case_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__magic_name__ = output_stream[0]
__magic_name__ = np.frombuffer(snake_case_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : float , snake_case_ : str = "f32le" , ):
__magic_name__ = f'{sampling_rate}'
__magic_name__ = '''1'''
if format_for_conversion == "s16le":
__magic_name__ = 2
elif format_for_conversion == "f32le":
__magic_name__ = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__magic_name__ = platform.system()
if system == "Linux":
__magic_name__ = '''alsa'''
__magic_name__ = '''default'''
elif system == "Darwin":
__magic_name__ = '''avfoundation'''
__magic_name__ = ''':0'''
elif system == "Windows":
__magic_name__ = '''dshow'''
__magic_name__ = '''default'''
__magic_name__ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__magic_name__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__magic_name__ = _ffmpeg_stream(snake_case_ , snake_case_ )
for item in iterator:
yield item
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : float , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[Tuple[float, float], float]] = None , snake_case_ : str = "f32le" , ):
if stream_chunk_s is not None:
__magic_name__ = stream_chunk_s
else:
__magic_name__ = chunk_length_s
__magic_name__ = ffmpeg_microphone(snake_case_ , snake_case_ , format_for_conversion=snake_case_ )
if format_for_conversion == "s16le":
__magic_name__ = np.intaa
__magic_name__ = 2
elif format_for_conversion == "f32le":
__magic_name__ = np.floataa
__magic_name__ = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__magic_name__ = chunk_length_s / 6
__magic_name__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(snake_case_ , (int, float) ):
__magic_name__ = [stride_length_s, stride_length_s]
__magic_name__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__magic_name__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__magic_name__ = datetime.datetime.now()
__magic_name__ = datetime.timedelta(seconds=snake_case_ )
for item in chunk_bytes_iter(snake_case_ , snake_case_ , stride=(stride_left, stride_right) , stream=snake_case_ ):
# Put everything back in numpy scale
__magic_name__ = np.frombuffer(item['''raw'''] , dtype=snake_case_ )
__magic_name__ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__magic_name__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Tuple[int, int] , snake_case_ : bool = False ):
__magic_name__ = B''''''
__magic_name__ , __magic_name__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__magic_name__ = 0
for raw in iterator:
acc += raw
if stream and len(snake_case_ ) < chunk_len:
__magic_name__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(snake_case_ ) >= chunk_len:
# We are flushing the accumulator
__magic_name__ = (_stride_left, stride_right)
__magic_name__ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__magic_name__ = False
yield item
__magic_name__ = stride_left
__magic_name__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(snake_case_ ) > stride_left:
__magic_name__ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__magic_name__ = False
yield item
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int ):
__magic_name__ = 2**24 # 16Mo
try:
with subprocess.Popen(snake_case_ , stdout=subprocess.PIPE , bufsize=snake_case_ ) as ffmpeg_process:
while True:
__magic_name__ = ffmpeg_process.stdout.read(snake_case_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error | 678 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Any , ):
__magic_name__ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
__magic_name__ , __magic_name__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
__magic_name__ = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case_ )
assert base_extractor.is_extractable(snake_case_ )
__magic_name__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case_ , snake_case_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__magic_name__ = file_path.read_text(encoding='''utf-8''' )
else:
__magic_name__ = output_path.read_text(encoding='''utf-8''' )
__magic_name__ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , ):
__magic_name__ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
__magic_name__ = input_paths[compression_format]
if input_path is None:
__magic_name__ = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case_ )
__magic_name__ = Extractor.infer_extractor_format(snake_case_ )
assert extractor_format is not None
__magic_name__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case_ , snake_case_ , snake_case_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__magic_name__ = file_path.read_text(encoding='''utf-8''' )
else:
__magic_name__ = output_path.read_text(encoding='''utf-8''' )
__magic_name__ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
import tarfile
__magic_name__ = tmp_path / '''data_dot_dot'''
directory.mkdir()
__magic_name__ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
import tarfile
__magic_name__ = tmp_path / '''data_sym_link'''
directory.mkdir()
__magic_name__ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case_ )
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int ):
__magic_name__ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
__magic_name__ = insecure_tar_files[insecure_tar_file]
__magic_name__ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case_ , snake_case_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__magic_name__ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
__magic_name__ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case_ )
assert zipfile.is_zipfile(str(snake_case_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case_ ) # but we're right | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Tuple = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[Any]=False , snake_case_ : str=False , snake_case_ : List[Any]=False ):
__magic_name__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[Any] ):
for i in range(config.num_hidden_layers ):
__magic_name__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
__magic_name__ = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ = in_proj_bias[: config.hidden_size]
__magic_name__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = dct.pop(snake_case_ )
__magic_name__ = val
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
__magic_name__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=snake_case_ )
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
if "vqa" in checkpoint_url:
__magic_name__ = True
__magic_name__ = 3129
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = '''vqa2-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(snake_case_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = ViltForQuestionAnswering(snake_case_ )
elif "nlvr" in checkpoint_url:
__magic_name__ = True
__magic_name__ = 2
__magic_name__ = {0: '''False''', 1: '''True'''}
__magic_name__ = {v: k for k, v in config.idalabel.items()}
__magic_name__ = 3
__magic_name__ = ViltForImagesAndTextClassification(snake_case_ )
elif "irtr" in checkpoint_url:
__magic_name__ = True
__magic_name__ = ViltForImageAndTextRetrieval(snake_case_ )
elif "mlm_itm" in checkpoint_url:
__magic_name__ = True
__magic_name__ = ViltForMaskedLM(snake_case_ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__magic_name__ = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )['''state_dict''']
__magic_name__ = create_rename_keys(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ )
if mlm_model or irtr_model:
__magic_name__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__magic_name__ , __magic_name__ = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case_ )
# Define processor
__magic_name__ = ViltImageProcessor(size=384 )
__magic_name__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ = ViltProcessor(snake_case_ , snake_case_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
__magic_name__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=snake_case_ ).raw )
__magic_name__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=snake_case_ ).raw )
__magic_name__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__magic_name__ = processor(snake_case_ , snake_case_ , return_tensors='''pt''' )
__magic_name__ = processor(snake_case_ , snake_case_ , return_tensors='''pt''' )
__magic_name__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__magic_name__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=snake_case_ ).raw )
if mlm_model:
__magic_name__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
__magic_name__ = '''How many cats are there?'''
__magic_name__ = processor(snake_case_ , snake_case_ , return_tensors='''pt''' )
__magic_name__ = model(**snake_case_ )
# Verify outputs
if mlm_model:
__magic_name__ = torch.Size([1, 11, 3_0522] )
__magic_name__ = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case_ , atol=1E-4 )
# verify masked token prediction equals "cats"
__magic_name__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__magic_name__ = torch.Size([1, 3129] )
__magic_name__ = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case_ , atol=1E-4 )
# verify vqa prediction equals "2"
__magic_name__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__magic_name__ = torch.Size([1, 2] )
__magic_name__ = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 678 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : str ):
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = []
if args.gold_data_mode == "qa":
__magic_name__ = pd.read_csv(snake_case_ , sep='''\t''' , header=snake_case_ )
for answer_list in data[1]:
__magic_name__ = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = [[reference] for reference in references]
__magic_name__ = __magic_name__ = __magic_name__ = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = 100.0 * em / total
__magic_name__ = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Optional[int] ):
__magic_name__ = args.k
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__magic_name__ = __magic_name__ = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
__magic_name__ = set(hypo.split('''\t''' )[:k] )
__magic_name__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__magic_name__ = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[str] ):
def strip_title(snake_case_ : List[str] ):
if title.startswith('''"''' ):
__magic_name__ = title[1:]
if title.endswith('''"''' ):
__magic_name__ = title[:-1]
return title
__magic_name__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ , )['''input_ids'''].to(args.device )
__magic_name__ = rag_model.rag.question_encoder(snake_case_ )
__magic_name__ = question_enc_outputs[0]
__magic_name__ = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__magic_name__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__magic_name__ = []
for docs in all_docs:
__magic_name__ = [strip_title(snake_case_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(snake_case_ ) )
return provenance_strings
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
with torch.no_grad():
__magic_name__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ )
__magic_name__ = inputs_dict.input_ids.to(args.device )
__magic_name__ = inputs_dict.attention_mask.to(args.device )
__magic_name__ = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__magic_name__ = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info('''Q: {} - A: {}'''.format(snake_case_ , snake_case_ ) )
return answers
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=snake_case_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=snake_case_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=snake_case_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=snake_case_ , type=snake_case_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=snake_case_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=snake_case_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=snake_case_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=snake_case_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=snake_case_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=snake_case_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=snake_case_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=snake_case_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=snake_case_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = {}
if args.model_type is None:
__magic_name__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
__magic_name__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__magic_name__ = args.n_docs
if args.index_name is not None:
__magic_name__ = args.index_name
if args.index_path is not None:
__magic_name__ = args.index_path
else:
__magic_name__ = BartForConditionalGeneration
__magic_name__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , snake_case_ )
__magic_name__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__magic_name__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(snake_case_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
__magic_name__ = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
__magic_name__ = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
__magic_name__ = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
__magic_name__ = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
__magic_name__ = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) + '''\n''' )
preds_file.flush()
__magic_name__ = []
if len(snake_case_ ) > 0:
__magic_name__ = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ : Tuple = get_args()
main(args) | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : List[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_a = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
__magic_name__ = import_module('''tasks''' )
try:
__magic_name__ = getattr(snake_case_ , model_args.task_type )
__magic_name__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__magic_name__ = token_classification_task.get_labels(data_args.labels )
__magic_name__ = dict(enumerate(snake_case_ ) )
__magic_name__ = len(snake_case_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , idalabel=snake_case_ , labelaid={label: i for i, label in enumerate(snake_case_ )} , cache_dir=model_args.cache_dir , )
__magic_name__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__magic_name__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
# Get datasets
__magic_name__ = (
TokenClassificationDataset(
token_classification_task=snake_case_ , data_dir=data_args.data_dir , tokenizer=snake_case_ , labels=snake_case_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__magic_name__ = (
TokenClassificationDataset(
token_classification_task=snake_case_ , data_dir=data_args.data_dir , tokenizer=snake_case_ , labels=snake_case_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> Tuple[List[int], List[int]]:
__magic_name__ = np.argmax(snake_case_ , axis=2 )
__magic_name__ , __magic_name__ = preds.shape
__magic_name__ = [[] for _ in range(snake_case_ )]
__magic_name__ = [[] for _ in range(snake_case_ )]
for i in range(snake_case_ ):
for j in range(snake_case_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case_ : EvalPrediction ) -> Dict:
__magic_name__ , __magic_name__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(snake_case_ , snake_case_ ),
"precision": precision_score(snake_case_ , snake_case_ ),
"recall": recall_score(snake_case_ , snake_case_ ),
"f1": fa_score(snake_case_ , snake_case_ ),
}
# Data collator
__magic_name__ = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , compute_metrics=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ = trainer.evaluate()
__magic_name__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , snake_case_ , snake_case_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(snake_case_ )
# Predict
if training_args.do_predict:
__magic_name__ = TokenClassificationDataset(
token_classification_task=snake_case_ , data_dir=data_args.data_dir , tokenizer=snake_case_ , labels=snake_case_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__magic_name__ , __magic_name__ , __magic_name__ = trainer.predict(snake_case_ )
__magic_name__ , __magic_name__ = align_predictions(snake_case_ , snake_case_ )
__magic_name__ = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , snake_case_ , snake_case_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__magic_name__ = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(snake_case_ , snake_case_ , snake_case_ )
return results
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 678 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE_ ( nn.Module , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = 32
_a = 4
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_a = False
_a = (320, 640, 1280, 1280)
_a = 2
_a = 8
_a = None
_a = 1280
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = False
def __A ( self , A ) -> FrozenDict:
'''simple docstring'''
__magic_name__ = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ = jnp.zeros(A , dtype=jnp.floataa )
__magic_name__ = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ , __magic_name__ = jax.random.split(A )
__magic_name__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A , A , A , A )["params"]
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.block_out_channels
__magic_name__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ = FlaxTimestepEmbedding(A , dtype=self.dtype )
__magic_name__ = self.only_cross_attention
if isinstance(A , A ):
__magic_name__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A , A ):
__magic_name__ = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ = []
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__magic_name__ = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A )
__magic_name__ = down_blocks
# mid
__magic_name__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__magic_name__ = []
__magic_name__ = list(reversed(A ) )
__magic_name__ = list(reversed(A ) )
__magic_name__ = list(reversed(A ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = reversed_block_out_channels[min(i + 1 , len(A ) - 1 )]
__magic_name__ = i == len(A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__magic_name__ = FlaxCrossAttnUpBlockaD(
in_channels=A , out_channels=A , prev_output_channel=A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__magic_name__ = FlaxUpBlockaD(
in_channels=A , out_channels=A , prev_output_channel=A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(A )
__magic_name__ = output_channel
__magic_name__ = up_blocks
# out
__magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__magic_name__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A , A , A , A=None , A=None , A = True , A = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(A , jnp.ndarray ):
__magic_name__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps.astype(dtype=jnp.floataa )
__magic_name__ = jnp.expand_dims(A , 0 )
__magic_name__ = self.time_proj(A )
__magic_name__ = self.time_embedding(A )
# 2. pre-process
__magic_name__ = jnp.transpose(A , (0, 2, 3, 1) )
__magic_name__ = self.conv_in(A )
# 3. down
__magic_name__ = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A ):
__magic_name__ , __magic_name__ = down_block(A , A , A , deterministic=not train )
else:
__magic_name__ , __magic_name__ = down_block(A , A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__magic_name__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
A , A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__magic_name__ = new_down_block_res_samples
# 4. mid
__magic_name__ = self.mid_block(A , A , A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__magic_name__ = down_block_res_samples[-(self.layers_per_block + 1) :]
__magic_name__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(A , A ):
__magic_name__ = up_block(
A , temb=A , encoder_hidden_states=A , res_hidden_states_tuple=A , deterministic=not train , )
else:
__magic_name__ = up_block(A , temb=A , res_hidden_states_tuple=A , deterministic=not train )
# 6. post-process
__magic_name__ = self.conv_norm_out(A )
__magic_name__ = nn.silu(A )
__magic_name__ = self.conv_out(A )
__magic_name__ = jnp.transpose(A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=A ) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """EncodecFeatureExtractor"""
_a = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , A , A ) -> Tuple:
'''simple docstring'''
super().__init__(A , A )
__magic_name__ = self.feature_extractor
__magic_name__ = False
def __A ( self , A=None , A=None , A=True ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*A , **A )
__magic_name__ = kwargs.pop('''audio''' , A )
__magic_name__ = kwargs.pop('''sampling_rate''' , A )
__magic_name__ = kwargs.pop('''text''' , A )
if len(A ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__magic_name__ = self.tokenizer(A , **A )
if audio is not None:
__magic_name__ = self.feature_extractor(A , *A , sampling_rate=A , **A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__magic_name__ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__magic_name__ = audio_inputs['''padding_mask''']
return inputs
def __A ( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''audio''' , A )
__magic_name__ = kwargs.pop('''padding_mask''' , A )
if len(A ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio_values is not None:
return self._decode_audio(A , padding_mask=A )
else:
return self.tokenizer.batch_decode(*A , **A )
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
def __A ( self , A , A = None ) -> List[np.ndarray]:
'''simple docstring'''
__magic_name__ = to_numpy(A )
__magic_name__ , __magic_name__ , __magic_name__ = audio_values.shape
if padding_mask is None:
return list(A )
__magic_name__ = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__magic_name__ = seq_len - padding_mask.shape[-1]
__magic_name__ = 1 - self.feature_extractor.padding_value
__magic_name__ = np.pad(A , ((0, 0), (0, difference)) , '''constant''' , constant_values=A )
__magic_name__ = audio_values.tolist()
for i in range(A ):
__magic_name__ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__magic_name__ = sliced_audio.reshape(A , -1 )
return audio_values | 678 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a_ : Any = logging.get_logger(__name__)
a_ : List[str] = 'T5Config'
def _SCREAMING_SNAKE_CASE ( snake_case_ : jnp.array , snake_case_ : int , snake_case_ : int ):
__magic_name__ = jnp.zeros_like(snake_case_ )
__magic_name__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__magic_name__ = shifted_input_ids.at[:, 0].set(snake_case_ )
__magic_name__ = jnp.where(shifted_input_ids == -100 , snake_case_ , snake_case_ )
return shifted_input_ids
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """mt5"""
_a = MTaConfig
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """mt5"""
_a = MTaConfig
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """mt5"""
_a = MTaConfig | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a_ : List[Any] = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger '''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
__magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained(A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__magic_name__ = generator.manual_seed(0 )
__magic_name__ = pipe(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__magic_name__ = '''A painting of a squirrel eating a burger '''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__magic_name__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 678 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__magic_name__ = {}
import re
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_conv_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__magic_name__ = re_encoder_block_proj_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_decoder_block_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_conv_out.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_resnet.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__magic_name__ = prefix + resnet_block
__magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__magic_name__ = re_prior_cond_proj_in.match(snake_case_ )
__magic_name__ = regex_match.groups()
__magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__magic_name__ = original_key
__magic_name__ = replace_key(snake_case_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
__magic_name__ = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__magic_name__ = original_key
__magic_name__ = original_key
__magic_name__ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
__magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ = JukeboxConfig.from_pretrained(snake_case_ )
__magic_name__ = JukeboxModel(snake_case_ )
__magic_name__ = []
__magic_name__ = {}
for i, dict_name in enumerate(snake_case_ ):
__magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
__magic_name__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ = old_dic[k]
else:
__magic_name__ = old_dic[k]
__magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}'
__magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__magic_name__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = 0
while number > 0:
__magic_name__ = number % 10
sum_of_digits += last_digit
__magic_name__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 100 ):
__magic_name__ = factorial(snake_case_ )
__magic_name__ = split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 678 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[int] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """table-transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __A ( self ) -> int:
'''simple docstring'''
return 12 | 678 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Tuple = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """glpn"""
def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[32, 64, 1_60, 2_56] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=0.1 , A=1E-6 , A=64 , A=10 , A=-1 , **A , ) -> int:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = num_channels
__magic_name__ = num_encoder_blocks
__magic_name__ = depths
__magic_name__ = sr_ratios
__magic_name__ = hidden_sizes
__magic_name__ = patch_sizes
__magic_name__ = strides
__magic_name__ = mlp_ratios
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = drop_path_rate
__magic_name__ = layer_norm_eps
__magic_name__ = decoder_hidden_size
__magic_name__ = max_depth
__magic_name__ = head_in_index | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case_ : SplitDict ):
__magic_name__ = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
__magic_name__ = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__magic_name__ = None
# the split name of split_dict takes over the name of the split info object
__magic_name__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
__magic_name__ = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ : Tuple = sys.version_info >= (3, 10)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=None ):
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = 42
_a = 42
_a = 42
_a = 42
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = 42
_a = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = False
_a = True
_a = None
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """titi"""
_a = """toto"""
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = """titi"""
_a = """toto"""
_a = 42
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = "toto"
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = "toto"
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = None
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """help message"""} )
_a = None
_a = list_field(default=[] )
_a = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = list_field(default=[] )
_a = list_field(default=[1, 2, 3] )
_a = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
_a = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field()
_a = field()
_a = field()
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = 42
_a = field()
_a = None
_a = field(default="""toto""" , metadata={"""help""": """help message"""} )
_a = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = False
_a = True
_a = None
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = None
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """help message"""} )
_a = None
_a = list_field(default=[] )
_a = list_field(default=[] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A , A ) -> Tuple:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__magic_name__ = {k: v for k, v in vars(A ).items() if k != '''container'''}
__magic_name__ = {k: v for k, v in vars(A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , A ) and yy.get('''choices''' , A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](A ) , yy['''type'''](A ) )
del xx["type"], yy["type"]
self.assertEqual(A , A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=A , required=A )
expected.add_argument('''--bar''' , type=A , required=A )
expected.add_argument('''--baz''' , type=A , required=A )
expected.add_argument('''--flag''' , type=A , default=A , const=A , nargs='''?''' )
self.argparsersEqual(A , A )
__magic_name__ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__magic_name__) , ) = parser.parse_args_into_dataclasses(A , look_for_args_file=A )
self.assertFalse(example.flag )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=A )
expected.add_argument('''--baz''' , default='''toto''' , type=A , help='''help message''' )
self.argparsersEqual(A , A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=A , default=A , const=A , nargs='''?''' )
expected.add_argument('''--baz''' , type=A , default=A , const=A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=A , dest='''baz''' )
expected.add_argument('''--opt''' , type=A , default=A )
__magic_name__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
__magic_name__ = HfArgumentParser(A )
self.argparsersEqual(A , A )
__magic_name__ = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
__magic_name__ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
__magic_name__ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
__magic_name__ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
__magic_name__ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(A , A )
__magic_name__ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__magic_name__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__magic_name__ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__magic_name__ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__magic_name__ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__magic_name__ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __A ( self ) -> str:
'''simple docstring'''
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = "toto"
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(A , A )
__magic_name__ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__magic_name__ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__magic_name__ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=A )
self.argparsersEqual(A , A )
__magic_name__ = parser.parse_args([] )
self.assertEqual(
A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__magic_name__ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=A , type=A )
expected.add_argument('''--bar''' , default=A , type=A , help='''help message''' )
expected.add_argument('''--baz''' , default=A , type=A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=A )
__magic_name__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
__magic_name__ = HfArgumentParser(A )
self.argparsersEqual(A , A )
__magic_name__ = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , bar=A , baz=A , ces=[] , des=[] ) )
__magic_name__ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=A , required=A )
expected.add_argument('''--required_str''' , type=A , required=A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=A , )
self.argparsersEqual(A , A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=A , required=A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=A , )
expected.add_argument('''--opt''' , type=A , default=A )
expected.add_argument('''--baz''' , default='''toto''' , type=A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=A )
self.argparsersEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__magic_name__ = parser.parse_dict(A )[0]
__magic_name__ = BasicExample(**A )
self.assertEqual(A , A )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(A , parser.parse_dict , A , allow_extra_keys=A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = os.path.join(A , '''temp_json''' )
os.mkdir(A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(A , A )
__magic_name__ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__magic_name__ = BasicExample(**A )
self.assertEqual(A , A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
__magic_name__ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = os.path.join(A , '''temp_yaml''' )
os.mkdir(A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(A , A )
__magic_name__ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__magic_name__ = BasicExample(**A )
self.assertEqual(A , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = HfArgumentParser(A )
self.assertIsNotNone(A ) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__magic_name__ = dict(zip(A , range(len(A ) ) ) )
__magic_name__ = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__magic_name__ = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ = os.path.join(self.tmpdirname , A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
# load decoder from hub
__magic_name__ = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self , **A ) -> str:
'''simple docstring'''
__magic_name__ = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> int:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
__magic_name__ = floats_list((3, 10_00) )
__magic_name__ = feature_extractor(A , return_tensors='''np''' )
__magic_name__ = processor(A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
__magic_name__ = '''This is a test string'''
__magic_name__ = processor(text=A )
__magic_name__ = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self , A=(2, 10, 16) , A=77 ) -> int:
'''simple docstring'''
np.random.seed(A )
return np.random.rand(*A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
__magic_name__ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__magic_name__ = processor.decode(A )
__magic_name__ = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
__magic_name__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__ = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
__magic_name__ = processor.batch_decode(A , A )
__magic_name__ = list(A )
with get_context('''fork''' ).Pool() as p:
__magic_name__ = decoder.decode_beams_batch(A , A )
__magic_name__ , __magic_name__ , __magic_name__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A , decoded_processor.logit_score )
self.assertListEqual(A , decoded_processor.lm_score )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
__magic_name__ = self._get_dummy_logits()
__magic_name__ = 15
__magic_name__ = -20.0
__magic_name__ = -4.0
__magic_name__ = processor.batch_decode(
A , beam_width=A , beam_prune_logp=A , token_min_logp=A , )
__magic_name__ = decoded_processor_out.text
__magic_name__ = list(A )
with get_context('''fork''' ).Pool() as pool:
__magic_name__ = decoder.decode_beams_batch(
A , A , beam_width=A , beam_prune_logp=A , token_min_logp=A , )
__magic_name__ = [d[0][0] for d in decoded_decoder_out]
__magic_name__ = [d[0][2] for d in decoded_decoder_out]
__magic_name__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A , A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A )
self.assertTrue(np.array_equal(A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , A , atol=1E-3 ) )
self.assertTrue(np.array_equal(A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , A , atol=1E-3 ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
__magic_name__ = self._get_dummy_logits()
__magic_name__ = 2.0
__magic_name__ = 5.0
__magic_name__ = -20.0
__magic_name__ = True
__magic_name__ = processor.batch_decode(
A , alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , )
__magic_name__ = decoded_processor_out.text
__magic_name__ = list(A )
decoder.reset_params(
alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , )
with get_context('''fork''' ).Pool() as pool:
__magic_name__ = decoder.decode_beams_batch(
A , A , )
__magic_name__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A , A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A )
__magic_name__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__magic_name__ = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__magic_name__ = os.listdir(A )
__magic_name__ = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained(A )
__magic_name__ = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__magic_name__ = os.listdir(A )
__magic_name__ = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A , A )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__magic_name__ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__magic_name__ = floats_list((3, 10_00) )
__magic_name__ = processor_wavaveca(A , return_tensors='''np''' )
__magic_name__ = processor_auto(A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__ = self._get_dummy_logits()
__magic_name__ = processor_wavaveca.batch_decode(A )
__magic_name__ = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_decoder()
__magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = [d[key] for d in offsets]
return retrieved_list
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__magic_name__ = self._get_dummy_logits()[0]
__magic_name__ = processor.decode(A , output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A , A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__magic_name__ = self._get_dummy_logits()
__magic_name__ = processor.batch_decode(A , output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A , A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
__magic_name__ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A )
__magic_name__ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
__magic_name__ = iter(A )
__magic_name__ = next(A )
__magic_name__ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__magic_name__ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
__magic_name__ = model(A ).logits.cpu().numpy()
__magic_name__ = processor.decode(logits[0] , output_word_offsets=A )
__magic_name__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__ = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__magic_name__ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A , '''word''' ) ) , A )
self.assertEqual(''' '''.join(self.get_from_offsets(A , '''word''' ) ) , output.text )
# output times
__magic_name__ = torch.tensor(self.get_from_offsets(A , '''start_time''' ) )
__magic_name__ = torch.tensor(self.get_from_offsets(A , '''end_time''' ) )
# fmt: off
__magic_name__ = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
__magic_name__ = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A , A , atol=0.01 ) )
self.assertTrue(torch.allclose(A , A , atol=0.01 ) ) | 678 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A ) | 678 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.dummy_uncond_unet
__magic_name__ = DDIMScheduler()
__magic_name__ = self.dummy_vq_model
__magic_name__ = LDMPipeline(unet=A , vqvae=A , scheduler=A )
ldm.to(A )
ldm.set_progress_bar_config(disable=A )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = ldm(generator=A , num_inference_steps=2 , output_type='''numpy''' ).images
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = ldm(generator=A , num_inference_steps=2 , output_type='''numpy''' , return_dict=A )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
__magic_name__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(A )
ldm.set_progress_bar_config(disable=A )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = ldm(generator=A , num_inference_steps=5 , output_type='''numpy''' ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__magic_name__ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
__magic_name__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 678 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
__magic_name__ = []
def __A ( self , A , A , A , **A ) -> Optional[Any]:
'''simple docstring'''
self.events.append('''on_init_end''' )
def __A ( self , A , A , A , **A ) -> int:
'''simple docstring'''
self.events.append('''on_train_begin''' )
def __A ( self , A , A , A , **A ) -> Dict:
'''simple docstring'''
self.events.append('''on_train_end''' )
def __A ( self , A , A , A , **A ) -> Any:
'''simple docstring'''
self.events.append('''on_epoch_begin''' )
def __A ( self , A , A , A , **A ) -> str:
'''simple docstring'''
self.events.append('''on_epoch_end''' )
def __A ( self , A , A , A , **A ) -> int:
'''simple docstring'''
self.events.append('''on_step_begin''' )
def __A ( self , A , A , A , **A ) -> Optional[Any]:
'''simple docstring'''
self.events.append('''on_step_end''' )
def __A ( self , A , A , A , **A ) -> Any:
'''simple docstring'''
self.events.append('''on_evaluate''' )
def __A ( self , A , A , A , **A ) -> Tuple:
'''simple docstring'''
self.events.append('''on_predict''' )
def __A ( self , A , A , A , **A ) -> List[str]:
'''simple docstring'''
self.events.append('''on_save''' )
def __A ( self , A , A , A , **A ) -> Optional[Any]:
'''simple docstring'''
self.events.append('''on_log''' )
def __A ( self , A , A , A , **A ) -> int:
'''simple docstring'''
self.events.append('''on_prediction_step''' )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
def __A ( self ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __A ( self , A=0 , A=0 , A=64 , A=64 , A=None , A=False , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = RegressionDataset(length=A )
__magic_name__ = RegressionDataset(length=A )
__magic_name__ = RegressionModelConfig(a=A , b=A )
__magic_name__ = RegressionPreTrainedModel(A )
__magic_name__ = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
__magic_name__ = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
__magic_name__ = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def __A ( self , A ) -> Dict:
'''simple docstring'''
__magic_name__ = ['''on_init_end''', '''on_train_begin''']
__magic_name__ = 0
__magic_name__ = len(trainer.get_eval_dataloader() )
__magic_name__ = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.get_trainer()
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ = self.get_trainer(disable_tqdm=A )
__magic_name__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
__magic_name__ = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=A )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0] | 678 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a_ : Tuple = None
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ : List[str] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
a_ : Optional[int] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
a_ : Any = '▁'
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BigBirdTokenizer
_a = ["""input_ids""", """attention_mask"""]
_a = []
def __init__( self , A=None , A=None , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , **A , ) -> int:
'''simple docstring'''
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,) | 678 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return "".join(sorted(snake_case_ ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return word_by_signature[signature(snake_case_ )]
a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
a_ : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 678 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , A=None , A=2 , ) -> Tuple:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = scope
__magic_name__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = num_patches + 2
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = TFDeiTModel(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A ) -> str:
'''simple docstring'''
__magic_name__ = TFDeiTForMaskedImageModeling(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = TFDeiTForMaskedImageModeling(A )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , A , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.type_sequence_label_size
__magic_name__ = TFDeiTForImageClassification(A )
__magic_name__ = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = TFDeiTForImageClassification(A )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = TFDeiTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , tf.keras.layers.Dense ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(A )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def __A ( self , A , A , A=False ) -> Dict:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Dict:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''tf''' )
# forward pass
__magic_name__ = model(**A )
# verify the logits
__magic_name__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
__magic_name__ = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) ) | 678 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 1 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 |
import re
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
__magic_name__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
a_ : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 678 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Dict:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def __A ( self , A , A , A , A , A , A , A ) -> int:
'''simple docstring'''
__magic_name__ = BioGptModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
'''simple docstring'''
__magic_name__ = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
__magic_name__ = torch.ones(input_ids.shape , dtype=torch.long , device=A )
__magic_name__ = self.seq_length // 2
__magic_name__ = 0
# first forward pass
__magic_name__ , __magic_name__ = model(A , attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__magic_name__ = ids_tensor((1,) , A ).item() + 1
__magic_name__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__magic_name__ = random_other_next_tokens
# append to next input_ids and attn_mask
__magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , )
# get two different outputs
__magic_name__ = model(A , attention_mask=A )['''last_hidden_state''']
__magic_name__ = model(A , past_key_values=A , attention_mask=A )['''last_hidden_state''']
# select random slice
__magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ = output_from_no_past[:, -1, random_slice_idx].detach()
__magic_name__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def __A ( self , A , A , A , A , A , *A ) -> Any:
'''simple docstring'''
__magic_name__ = BioGptModel(config=A ).to(A ).eval()
__magic_name__ = torch.ones(input_ids.shape , dtype=torch.long , device=A )
# first forward pass
__magic_name__ = model(A , attention_mask=A , use_cache=A )
__magic_name__ , __magic_name__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__magic_name__ = model(A , attention_mask=A )['''last_hidden_state''']
__magic_name__ = model(A , attention_mask=A , past_key_values=A )[
'''last_hidden_state'''
]
# select random slice
__magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def __A ( self , A , A , A , A , A , *A , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__magic_name__ = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __A ( self , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = BioGptModel(A )
__magic_name__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __A ( self , A , A , A , A , A , *A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = BioGptForTokenClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_a = (BioGptForCausalLM,) if is_torch_available() else ()
_a = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = BioGptModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(A )
__magic_name__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__magic_name__ = '''left'''
# Define PAD Token = EOS Token = 50256
__magic_name__ = tokenizer.eos_token
__magic_name__ = model.config.eos_token_id
# use different length sentences to test batching
__magic_name__ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__magic_name__ = tokenizer(A , return_tensors='''pt''' , padding=A )
__magic_name__ = inputs['''input_ids'''].to(A )
__magic_name__ = model.generate(
input_ids=A , attention_mask=inputs['''attention_mask'''].to(A ) , )
__magic_name__ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(A )
__magic_name__ = model.generate(input_ids=A )
__magic_name__ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__magic_name__ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(A )
__magic_name__ = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
__magic_name__ = tokenizer.batch_decode(A , skip_special_tokens=A )
__magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
__magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
__magic_name__ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = input_dict['''input_ids''']
__magic_name__ = input_ids.ne(1 ).to(A )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = '''multi_label_classification'''
__magic_name__ = input_dict['''input_ids''']
__magic_name__ = input_ids.ne(1 ).to(A )
__magic_name__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__magic_name__ = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__magic_name__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
__magic_name__ = model(A )[0]
__magic_name__ = 4_23_84
__magic_name__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A )
__magic_name__ = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__magic_name__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(A )
torch.manual_seed(0 )
__magic_name__ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(A )
__magic_name__ = model.generate(
**A , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=A , )
__magic_name__ = tokenizer.decode(output_ids[0] , skip_special_tokens=A )
__magic_name__ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(A , A ) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE_ ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self , A=None , **A ) -> Tuple:
'''simple docstring'''
super().__init__(features=A )
__magic_name__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __A ( self , A ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def __A ( self , A ) -> Tuple:
'''simple docstring'''
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__magic_name__ = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__magic_name__ = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__magic_name__ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
__magic_name__ = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
__magic_name__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def __A ( self , A ) -> Any:
'''simple docstring'''
return map_nested(self._recursive_tensorize , A , map_list=A )
def __A ( self , A ) -> Mapping:
'''simple docstring'''
__magic_name__ = self.numpy_arrow_extractor().extract_row(A )
__magic_name__ = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def __A ( self , A ) -> "torch.Tensor":
'''simple docstring'''
__magic_name__ = self.numpy_arrow_extractor().extract_column(A )
__magic_name__ = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
__magic_name__ = self.recursive_tensorize(A )
__magic_name__ = self._consolidate(A )
return column
def __A ( self , A ) -> Mapping:
'''simple docstring'''
__magic_name__ = self.numpy_arrow_extractor().extract_batch(A )
__magic_name__ = self.python_features_decoder.decode_batch(A )
__magic_name__ = self.recursive_tensorize(A )
for column_name in batch:
__magic_name__ = self._consolidate(batch[column_name] )
return batch | 678 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
a_ : Any = ['model.decoder.embed_positions.weights']
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if "emb" in name:
__magic_name__ = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__magic_name__ = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__magic_name__ = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__magic_name__ = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__magic_name__ = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__magic_name__ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__magic_name__ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__magic_name__ = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__magic_name__ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : OrderedDict , snake_case_ : int ):
__magic_name__ = list(state_dict.keys() )
__magic_name__ = {}
for key in keys:
__magic_name__ = state_dict.pop(snake_case_ )
__magic_name__ = rename_keys(snake_case_ )
if "in_proj_weight" in key:
# split fused qkv proj
__magic_name__ = val[:hidden_size, :]
__magic_name__ = val[hidden_size : 2 * hidden_size, :]
__magic_name__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__magic_name__ = val
else:
__magic_name__ = val
return state_dict, enc_dec_proj_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
if checkpoint == "small":
# default config values
__magic_name__ = 1024
__magic_name__ = 24
__magic_name__ = 16
elif checkpoint == "medium":
__magic_name__ = 1536
__magic_name__ = 48
__magic_name__ = 24
elif checkpoint == "large":
__magic_name__ = 2048
__magic_name__ = 48
__magic_name__ = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__magic_name__ = MusicgenDecoderConfig(
hidden_size=snake_case_ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , )
return config
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : Tuple=None , snake_case_ : int="cpu" ):
__magic_name__ = MusicGen.get_pretrained(snake_case_ , device=snake_case_ )
__magic_name__ = decoder_config_from_checkpoint(snake_case_ )
__magic_name__ = fairseq_model.lm.state_dict()
__magic_name__ , __magic_name__ = rename_state_dict(
snake_case_ , hidden_size=decoder_config.hidden_size )
__magic_name__ = TaEncoderModel.from_pretrained('''t5-base''' )
__magic_name__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__magic_name__ = MusicgenForCausalLM(snake_case_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__magic_name__ , __magic_name__ = decoder.load_state_dict(snake_case_ , strict=snake_case_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(snake_case_ ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__magic_name__ = MusicgenForConditionalGeneration(text_encoder=snake_case_ , audio_encoder=snake_case_ , decoder=snake_case_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case_ )
# check we can do a forward pass
__magic_name__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__magic_name__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__magic_name__ = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__magic_name__ = AutoTokenizer.from_pretrained('''t5-base''' )
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__magic_name__ = MusicgenProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
# set the appropriate bos/pad token ids
__magic_name__ = 2048
__magic_name__ = 2048
# set other default generation config params
__magic_name__ = int(30 * audio_encoder.config.frame_rate )
__magic_name__ = True
__magic_name__ = 3.0
if pytorch_dump_folder is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(snake_case_ )
processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
a_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub) | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__magic_name__ = gray_code_sequence_string(snake_case_ )
#
# convert them to integers
for i in range(len(snake_case_ ) ):
__magic_name__ = int(sequence[i] , 2 )
return sequence
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ = gray_code_sequence_string(bit_count - 1 )
__magic_name__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ = '''0''' + smaller_sequence[i]
sequence.append(snake_case_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ = '''1''' + smaller_sequence[i]
sequence.append(snake_case_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A ) | 678 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if n == 1 or not isinstance(snake_case_ , snake_case_ ):
return 0
elif n == 2:
return 1
else:
__magic_name__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = 0
__magic_name__ = 2
while digits < n:
index += 1
__magic_name__ = len(str(fibonacci(snake_case_ ) ) )
return index
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ):
return fibonacci_digits_index(snake_case_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 678 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__magic_name__ = text_generator('''This is a test''' , do_sample=A )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__magic_name__ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__magic_name__ = text_generator('''This is a test''' , do_sample=A , num_return_sequences=2 , return_tensors=A )
self.assertEqual(
A , [
{'''generated_token_ids''': ANY(A )},
{'''generated_token_ids''': ANY(A )},
] , )
__magic_name__ = text_generator.model.config.eos_token_id
__magic_name__ = '''<pad>'''
__magic_name__ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , )
self.assertEqual(
A , [
[
{'''generated_token_ids''': ANY(A )},
{'''generated_token_ids''': ANY(A )},
],
[
{'''generated_token_ids''': ANY(A )},
{'''generated_token_ids''': ANY(A )},
],
] , )
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__magic_name__ = text_generator('''This is a test''' , do_sample=A )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__magic_name__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=A )
self.assertEqual(
A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __A ( self , A , A , A ) -> Dict:
'''simple docstring'''
__magic_name__ = TextGenerationPipeline(model=A , tokenizer=A )
return text_generator, ["This is a test", "Another test"]
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''Hello I believe in'''
__magic_name__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = text_generator(A )
self.assertEqual(
A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__magic_name__ = text_generator(A , stop_sequence=''' fe''' )
self.assertEqual(A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __A ( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = text_generator.model
__magic_name__ = text_generator.tokenizer
__magic_name__ = text_generator('''This is a test''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__magic_name__ = text_generator('''This is a test''' , return_full_text=A )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__magic_name__ = pipeline(task='''text-generation''' , model=A , tokenizer=A , return_full_text=A )
__magic_name__ = text_generator('''This is a test''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__magic_name__ = text_generator('''This is a test''' , return_full_text=A )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__magic_name__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A )
self.assertEqual(
A , [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A )
self.assertEqual(
A , [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
] , )
with self.assertRaises(A ):
__magic_name__ = text_generator('''test''' , return_full_text=A , return_text=A )
with self.assertRaises(A ):
__magic_name__ = text_generator('''test''' , return_full_text=A , return_tensors=A )
with self.assertRaises(A ):
__magic_name__ = text_generator('''test''' , return_text=A , return_tensors=A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__ = text_generator('''''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
__magic_name__ = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __A ( self ) -> Any:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__magic_name__ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__ = pipe('''This is a test''' )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__ = pipe('''This is a test''' )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__ = pipe('''This is a test''' )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __A ( self ) -> Tuple:
'''simple docstring'''
import torch
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=A , top_p=0.5 )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = '''Hello world'''
__magic_name__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__magic_name__ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__magic_name__ = logging.get_logger('''transformers.generation.utils''' )
__magic_name__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A ) as cl:
__magic_name__ = text_generator(A , max_length=10 , max_new_tokens=1 )
self.assertIn(A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A ) as cl:
__magic_name__ = text_generator(A , max_new_tokens=1 )
self.assertNotIn(A , cl.out )
with CaptureLogger(A ) as cl:
__magic_name__ = text_generator(A , max_length=10 )
self.assertNotIn(A , cl.out ) | 678 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , A , A = None , A = None ) -> Dict:
'''simple docstring'''
super().__init__()
__magic_name__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__magic_name__ = torch.zeros(A , A )
else:
__magic_name__ = None
__magic_name__ = torch.nn.Parameter(A )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
def __init__( self , A , A , A , A , A , A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A , transformer=A , text_encoder=A , tokenizer=A , scheduler=A , learned_classifier_free_sampling_embeddings=A , )
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = len(A ) if isinstance(A , A ) else 1
# get prompt text embeddings
__magic_name__ = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__magic_name__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__magic_name__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__ = text_input_ids[:, : self.tokenizer.model_max_length]
__magic_name__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__magic_name__ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A )
# duplicate text embeddings for each generation per prompt
__magic_name__ = prompt_embeds.repeat_interleave(A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__magic_name__ = self.learned_classifier_free_sampling_embeddings.embeddings
__magic_name__ = negative_prompt_embeds.unsqueeze(0 ).repeat(A , 1 , 1 )
else:
__magic_name__ = [''''''] * batch_size
__magic_name__ = text_input_ids.shape[-1]
__magic_name__ = self.tokenizer(
A , padding='''max_length''' , max_length=A , truncation=A , return_tensors='''pt''' , )
__magic_name__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__magic_name__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ = negative_prompt_embeds.shape[1]
__magic_name__ = negative_prompt_embeds.repeat(1 , A , 1 )
__magic_name__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A , A = 1_00 , A = 5.0 , A = 1.0 , A = 1 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A , A ):
__magic_name__ = 1
elif isinstance(A , A ):
__magic_name__ = len(A )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(A )}' )
__magic_name__ = batch_size * num_images_per_prompt
__magic_name__ = guidance_scale > 1.0
__magic_name__ = self._encode_prompt(A , A , A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
# get the initial completely masked latents unless the user supplied it
__magic_name__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__magic_name__ = self.transformer.num_vector_embeds - 1
__magic_name__ = torch.full(A , A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__magic_name__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A , device=self.device )
__magic_name__ = self.scheduler.timesteps.to(self.device )
__magic_name__ = latents
for i, t in enumerate(self.progress_bar(A ) ):
# expand the sample if we are doing classifier free guidance
__magic_name__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__magic_name__ = self.transformer(A , encoder_hidden_states=A , timestep=A ).sample
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ = model_output.chunk(2 )
__magic_name__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A , dim=1 , keepdim=A )
__magic_name__ = self.truncate(A , A )
# remove `log(0)`'s (`-inf`s)
__magic_name__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , timestep=A , sample=A , generator=A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__magic_name__ = self.vqvae.config.vq_embed_dim
__magic_name__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__magic_name__ = self.vqvae.quantize.get_codebook_entry(A , shape=A )
__magic_name__ = self.vqvae.decode(A , force_not_quantize=A ).sample
__magic_name__ = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
def __A ( self , A , A ) -> torch.FloatTensor:
'''simple docstring'''
__magic_name__ , __magic_name__ = torch.sort(A , 1 , descending=A )
__magic_name__ = torch.exp(A )
__magic_name__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__magic_name__ = torch.full_like(keep_mask[:, 0:1, :] , A )
__magic_name__ = torch.cat((all_true, keep_mask) , dim=1 )
__magic_name__ = keep_mask[:, :-1, :]
__magic_name__ = keep_mask.gather(1 , indices.argsort(1 ) )
__magic_name__ = log_p_x_0.clone()
__magic_name__ = -torch.inf # -inf = log(0)
return rv | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[int] = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = compute_mauve(
p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , )
return out | 678 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
__magic_name__ = len(snake_case_ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ = 0
print(snake_case_ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end=''',''' )
__magic_name__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [1, 3, 0, 5, 8, 5]
a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.