code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : str = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Tuple = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Any = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : List[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : str = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['sentencepiece'] )
| 356 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase : Optional[int] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 103 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Dict = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowercase_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
lowercase_ = F'https://www.google.com/search?q={query}&num=100'
lowercase_ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
lowercase_ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
lowercase_ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 357 |
import os
from collections.abc import Iterator
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip("./" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return f'''{i * " "}*''' if i else "\n##"
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
lowercase__ = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = os.path.split(SCREAMING_SNAKE_CASE_ )
if filepath != old_path:
lowercase__ = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'''{filepath}/{filename}'''.replace(" " , "%20" )
lowercase__ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 224 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "dandelin/vilt-b32-finetuned-vqa"
__SCREAMING_SNAKE_CASE = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__SCREAMING_SNAKE_CASE = "image_qa"
__SCREAMING_SNAKE_CASE = AutoProcessor
__SCREAMING_SNAKE_CASE = AutoModelForVisualQuestionAnswering
__SCREAMING_SNAKE_CASE = ["image", "text"]
__SCREAMING_SNAKE_CASE = ["text"]
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
requires_backends(self , ["vision"])
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple:
return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="pt")
def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]:
with torch.no_grad():
return self.model(**__lowerCamelCase).logits
def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]:
_A : str = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : Union[str, Any]=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=1 / 255 , lowerCamelCase_ : Union[str, Any]=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=False ):
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
UpperCamelCase = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify masks
UpperCamelCase = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase_ )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
| 165 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# TODO: upload to AWS
_SCREAMING_SNAKE_CASE = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """retribert"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Any=3_0522 , lowerCamelCase_ : List[Any]=768 , lowerCamelCase_ : List[str]=8 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : str=3072 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : str=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=1E-12 , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=128 , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = share_encoders
UpperCamelCase = projection_dim
| 165 | 1 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = AutoConfig.from_pretrained(UpperCAmelCase )
snake_case_ = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCAmelCase )
snake_case_ = checkpoints.load_tax_checkpoint(UpperCAmelCase )
snake_case_ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
snake_case_ = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ = f'layers_{str(UpperCAmelCase )}'
# Self-Attention
snake_case_ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
snake_case_ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
snake_case_ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
snake_case_ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
snake_case_ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
snake_case_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
snake_case_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
snake_case_ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
snake_case_ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
snake_case_ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
snake_case_ = flax_model.params['encoder']['block'][str(UpperCAmelCase )]['layer']
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_global_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = tax_mlp_layer_norm
snake_case_ = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
snake_case_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
snake_case_ = tax_encoder_global_rel_embedding
# Assigning
snake_case_ = tax_model['target']['encoder']['encoder_norm']['scale']
snake_case_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ = f'layers_{str(UpperCAmelCase )}'
# Self-Attention
snake_case_ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
snake_case_ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
snake_case_ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
snake_case_ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
snake_case_ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
snake_case_ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
snake_case_ = tax_enc_dec_attention_module['key']['kernel']
snake_case_ = tax_enc_dec_attention_module['out']['kernel']
snake_case_ = tax_enc_dec_attention_module['query']['kernel']
snake_case_ = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
snake_case_ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
snake_case_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
snake_case_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
snake_case_ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
snake_case_ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
snake_case_ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
snake_case_ = flax_model.params['decoder']['block'][str(UpperCAmelCase )]['layer']
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_pre_attention_layer_norm
snake_case_ = tax_enc_dec_attention_key
snake_case_ = tax_enc_dec_attention_out
snake_case_ = tax_enc_dec_attention_query
snake_case_ = tax_enc_dec_attention_value
snake_case_ = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = txa_mlp_layer_norm
snake_case_ = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ = tax_model['target']['decoder']['decoder_norm']['scale']
snake_case_ = txa_decoder_norm
# Only for layer 0:
snake_case_ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
snake_case_ = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ = tax_model['target']['token_embedder']['embedding']
snake_case_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(UpperCAmelCase )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 69 | """simple docstring"""
from math import factorial
def UpperCAmelCase ( UpperCAmelCase = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 69 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __magic_name__ :
def __init__( self : int ,_UpperCAmelCase : Collection[float] | None = None ):
if components is None:
_a : Tuple = []
_a : Optional[int] = list(_UpperCAmelCase )
def __len__( self : int ):
return len(self.__components )
def __str__( self : Tuple ):
return "(" + ",".join(map(_UpperCAmelCase ,self.__components ) ) + ")"
def __add__( self : Optional[Any] ,_UpperCAmelCase : Vector ):
_a : str = len(self )
if size == len(_UpperCAmelCase ):
_a : Optional[Any] = [self.__components[i] + other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
else:
raise Exception('must have the same size' )
def __sub__( self : Optional[Any] ,_UpperCAmelCase : Vector ):
_a : str = len(self )
if size == len(_UpperCAmelCase ):
_a : str = [self.__components[i] - other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : List[str] ,_UpperCAmelCase : float ):
...
@overload
def __mul__( self : Optional[Any] ,_UpperCAmelCase : Vector ):
...
def __mul__( self : Optional[int] ,_UpperCAmelCase : float | Vector ):
if isinstance(_UpperCAmelCase ,(float, int) ):
_a : Optional[Any] = [c * other for c in self.__components]
return Vector(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and len(self ) == len(_UpperCAmelCase ):
_a : List[Any] = len(self )
_a : Union[str, Any] = [self.__components[i] * other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return sum(_UpperCAmelCase )
else: # error case
raise Exception('invalid operand!' )
def __lowercase ( self : int ):
return Vector(self.__components )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : float ):
assert -len(self.__components ) <= pos < len(self.__components )
_a : Dict = value
def __lowercase ( self : Optional[Any] ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_a : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(_UpperCAmelCase ) )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Vector ,_UpperCAmelCase : bool = False ):
_a : Dict = self * other
_a : Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Vector:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
return Vector([0] * dimension )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vector:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ))
_a : List[str] = [0] * dimension
_a : Optional[Any] = 1
return Vector(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Vector:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (isinstance(lowerCAmelCase_ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Vector:
random.seed(lowerCAmelCase_ )
_a : int = [random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : Tuple ,_UpperCAmelCase : list[list[float]] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
_a : str = matrix
_a : int = w
_a : Any = h
def __str__( self : Optional[Any] ):
_a : Optional[Any] = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Dict ,_UpperCAmelCase : Matrix ):
if self.__width == other.width() and self.__height == other.height():
_a : Any = []
for i in range(self.__height ):
_a : Optional[Any] = [
self.__matrix[i][j] + other.component(_UpperCAmelCase ,_UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(_UpperCAmelCase )
return Matrix(_UpperCAmelCase ,self.__width ,self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Any ,_UpperCAmelCase : Matrix ):
if self.__width == other.width() and self.__height == other.height():
_a : Tuple = []
for i in range(self.__height ):
_a : int = [
self.__matrix[i][j] - other.component(_UpperCAmelCase ,_UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(_UpperCAmelCase )
return Matrix(_UpperCAmelCase ,self.__width ,self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : Union[str, Any] ,_UpperCAmelCase : float ):
...
@overload
def __mul__( self : str ,_UpperCAmelCase : Vector ):
...
def __mul__( self : Dict ,_UpperCAmelCase : float | Vector ):
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): # matrix-vector
if len(_UpperCAmelCase ) == self.__width:
_a : str = zero_vector(self.__height )
for i in range(self.__height ):
_a : Any = [
self.__matrix[i][j] * other.component(_UpperCAmelCase )
for j in range(self.__width )
]
ans.change_component(_UpperCAmelCase ,sum(_UpperCAmelCase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_UpperCAmelCase ,(int, float) ): # matrix-scalar
_a : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_UpperCAmelCase ,self.__width ,self.__height )
return None
def __lowercase ( self : Optional[Any] ):
return self.__height
def __lowercase ( self : Any ):
return self.__width
def __lowercase ( self : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
_a : Union[str, Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_a : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_UpperCAmelCase ) ):
_a : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_UpperCAmelCase ,self.__width - 1 ,self.__height - 1 ).determinant()
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_UpperCAmelCase ,_UpperCAmelCase )
else:
raise Exception('Indices out of bounds' )
def __lowercase ( self : Dict ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_a : List[str] = [
self.__matrix[0][y] * self.cofactor(0 ,_UpperCAmelCase ) for y in range(self.__width )
]
return sum(_UpperCAmelCase )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Matrix:
_a : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase_ )]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Matrix:
random.seed(lowerCAmelCase_ )
_a : list[list[float]] = [
[random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )
]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 107 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_a : Dict = 'backbone.' if is_semantic else ''
_a : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> int:
for i in range(config.num_hidden_layers ):
_a : Union[str, Any] = 'backbone.' if is_semantic else ''
# queries, keys and values
_a : Any = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
_a : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
_a : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
_a : int = in_proj_weight[
: config.hidden_size, :
]
_a : str = q_bias
_a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_a : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_a : int = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
_a : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
_a : Tuple = gamma_a
_a : Optional[Any] = gamma_a
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_a : List[Any] = dct.pop(lowerCAmelCase_ )
_a : List[Any] = val
def __lowerCamelCase ( ) -> Dict:
_a : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
_a : Optional[Any] = False if 'rvlcdip' in checkpoint_url else True
_a : List[Any] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_a : Optional[Any] = 1024
_a : Tuple = 4096
_a : Any = 24
_a : Optional[int] = 16
# labels
if "rvlcdip" in checkpoint_url:
_a : Any = 16
_a : Tuple = 'huggingface/label-files'
_a : List[Any] = 'rvlcdip-id2label.json'
_a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : int = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_a : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='cpu' )['model']
_a : Dict = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
_a : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
_a : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
_a : Dict = prepare_img()
_a : str = image_processor(images=lowerCAmelCase_ , return_tensors='pt' )
_a : Optional[Any] = encoding['pixel_values']
_a : Optional[Any] = model(lowerCAmelCase_ )
_a : Optional[Any] = outputs.logits
# verify logits
_a : int = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
_a : Tuple = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
_a : int = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 107 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default="""NER""" ,metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} ,)
lowercase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(lowerCamelCase_ , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(lowerCamelCase_ ) )
lowercase__ = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid={label: i for i, label in enumerate(lowerCamelCase_ )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , labels=lowerCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , labels=lowerCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCamelCase_ , lowerCamelCase_ ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(lowerCamelCase_ , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(lowerCamelCase_ )]
lowercase__ = [[] for _ in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCamelCase_ ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCamelCase_ , lowerCamelCase_ ),
"precision": precision_score(lowerCamelCase_ , lowerCamelCase_ ),
"recall": recall_score(lowerCamelCase_ , lowerCamelCase_ ),
"f1": fa_score(lowerCamelCase_ , lowerCamelCase_ ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowerCamelCase_ , lowerCamelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowerCamelCase_ )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=lowerCamelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , labels=lowerCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(lowerCamelCase_ )
lowercase__ , lowercase__ = align_predictions(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , lowerCamelCase_ , lowerCamelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return results
def a ( lowerCamelCase_ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 207 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
if config is None:
assert isinstance(self.model, lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase__ = AdamW
lowercase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase )
return scheduler
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = inputs.pop('''labels''' )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return loss
def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ):
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase )
lowercase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
lowercase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 207 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : str , A : Union[str, Any] , A : Union[str, Any]=1_3 , A : Union[str, Any]=3_2 , A : Optional[Any]=3 , A : Optional[Any]=4 , A : List[Any]=[1_0, 2_0, 3_0, 4_0] , A : Dict=[2, 2, 3, 2] , A : Union[str, Any]=True , A : str=True , A : List[str]=3_7 , A : Optional[int]="gelu" , A : int=1_0 , A : List[Any]=0.02 , A : int=["stage2", "stage3", "stage4"] , A : Optional[Any]=[2, 3, 4] , A : int=None , ):
'''simple docstring'''
a : int = parent
a : str = batch_size
a : int = image_size
a : Union[str, Any] = num_channels
a : Union[str, Any] = num_stages
a : Dict = hidden_sizes
a : Any = depths
a : str = is_training
a : Optional[int] = use_labels
a : List[str] = intermediate_size
a : int = hidden_act
a : str = num_labels
a : Optional[int] = initializer_range
a : List[str] = out_features
a : Dict = out_indices
a : Tuple = scope
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Any = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
a : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : Optional[int] , A : Dict , A : List[Any] , A : Tuple ):
'''simple docstring'''
a : str = ConvNextModel(config=A )
model.to(A )
model.eval()
a : int = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase__ ( self : str , A : Union[str, Any] , A : Dict , A : Tuple ):
'''simple docstring'''
a : Union[str, Any] = ConvNextForImageClassification(A )
model.to(A )
model.eval()
a : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] , A : Optional[int] , A : int , A : Dict ):
'''simple docstring'''
a : Optional[int] = ConvNextBackbone(config=A )
model.to(A )
model.eval()
a : str = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a : Union[str, Any] = None
a : Optional[int] = ConvNextBackbone(config=A )
model.to(A )
model.eval()
a : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : List[Any] = self.prepare_config_and_inputs()
a, a, a : List[str] = config_and_inputs
a : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Optional[Any] = ConvNextModelTester(self )
a : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a, a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(A )
a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(A : Union[str, Any] , A : List[Any] , A : Union[str, Any] ):
a : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : List[Any] = model(**self._prepare_for_class(A , A ) )
a : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : Tuple = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : str = True
check_hidden_states_output(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = ConvNextModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Union[str, Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(A )
a : Optional[int] = self.default_image_processor
a : Optional[Any] = prepare_img()
a : str = image_processor(images=A , return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
a : Any = model(**A )
# verify the logits
a : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
a : Optional[Any] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@require_torch
class snake_case ( unittest.TestCase , UpperCAmelCase ):
__magic_name__ = (ConvNextBackbone,) if is_torch_available() else ()
__magic_name__ = ConvNextConfig
__magic_name__ = False
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Union[str, Any] = ConvNextModelTester(self )
| 186 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(A , 'num_encoder_blocks' ) )
class snake_case :
def __init__( self : List[Any] , A : Dict , A : List[Any]=1_3 , A : str=6_4 , A : Union[str, Any]=3 , A : Union[str, Any]=4 , A : Union[str, Any]=[2, 2, 2, 2] , A : List[str]=[8, 4, 2, 1] , A : Optional[Any]=[1_6, 3_2, 6_4, 1_2_8] , A : Optional[Any]=[1, 4, 8, 1_6] , A : Tuple=[1, 2, 4, 8] , A : Optional[Any]=True , A : Any=True , A : Optional[Any]="gelu" , A : Optional[int]=0.1 , A : List[Any]=0.1 , A : List[str]=0.02 , A : List[Any]=3 , A : str=None , ):
'''simple docstring'''
a : Optional[Any] = parent
a : Optional[Any] = batch_size
a : Optional[Any] = image_size
a : Optional[int] = num_channels
a : List[str] = num_encoder_blocks
a : Optional[Any] = sr_ratios
a : Any = depths
a : Any = hidden_sizes
a : Union[str, Any] = downsampling_rates
a : Any = num_attention_heads
a : int = is_training
a : Dict = use_labels
a : str = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : Dict = num_labels
a : Union[str, Any] = scope
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : int = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , A : str , A : List[Any] , A : List[Any] ):
'''simple docstring'''
a : Optional[Any] = SegformerModel(config=A )
model.to(A )
model.eval()
a : Union[str, Any] = model(A )
a : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase__ ( self : Optional[int] , A : Union[str, Any] , A : str , A : Optional[Any] ):
'''simple docstring'''
a : List[Any] = self.num_labels
a : Optional[int] = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
a : str = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a : int = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase__ ( self : Dict , A : Dict , A : Any , A : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = 1
a : List[Any] = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
a : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A )
a : Dict = model(A , labels=A )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : str = self.prepare_config_and_inputs()
a, a, a : str = config_and_inputs
a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Union[str, Any] = SegformerModelTester(self )
a : Tuple = SegformerConfigTester(self , config_class=A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip('SegFormer does not use inputs_embeds' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a, a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(A )
a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Optional[Any] = True
a : Tuple = False
a : int = True
a : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(A , A ) )
a : Union[str, Any] = outputs.attentions
a : Tuple = sum(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Tuple = True
a : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(A , A ) )
a : Optional[int] = outputs.attentions
self.assertEqual(len(A ) , A )
# verify the first attentions (first block, first layer)
a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a : Tuple = (self.model_tester.image_size // 3_2) ** 2
a : Tuple = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a : str = len(A )
# Check attention is always last and order is fine
a : str = True
a : Tuple = True
a : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 1 , len(A ) )
a : str = outputs.attentions
self.assertEqual(len(A ) , A )
# verify the first attentions (first block, first layer)
a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] , A : List[str] , A : Union[str, Any] ):
a : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(A , A ) )
a : Tuple = outputs.hidden_states
a : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) , A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : str = True
check_hidden_states_output(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
a : List[Any] = model_class(A )
model.to(A )
model.train()
a : Tuple = self._prepare_for_class(A , A , return_labels=A )
a : Any = model(**A ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Dict = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A )
a : str = prepare_img()
a : List[str] = image_processor(images=A , return_tensors='pt' )
a : List[str] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : Optional[int] = model(A )
a : Any = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , A )
a : str = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(A )
a : List[Any] = prepare_img()
a : Optional[Any] = image_processor(images=A , return_tensors='pt' )
a : int = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : Optional[Any] = model(A )
a : Tuple = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , A )
a : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1 ) )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A )
a : int = prepare_img()
a : Any = image_processor(images=A , return_tensors='pt' )
a : List[Any] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : str = model(A )
a : str = outputs.logits.detach().cpu()
a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_0_0, 3_0_0)] )
a : Dict = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , A )
a : int = image_processor.post_process_semantic_segmentation(outputs=A )
a : Any = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , A )
| 186 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase :
def __init__( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : int=2 , UpperCamelCase : int=3 , UpperCamelCase : Any=4 , UpperCamelCase : Any=2 , UpperCamelCase : List[str]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : str=True , UpperCamelCase : int=True , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Any=36 , UpperCamelCase : Tuple=3 , UpperCamelCase : List[str]=4 , UpperCamelCase : List[str]=37 , UpperCamelCase : int="gelu" , UpperCamelCase : Dict=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Union[str, Any]=5_12 , UpperCamelCase : Tuple=16 , UpperCamelCase : List[str]=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Union[str, Any]=6 , UpperCamelCase : Any=6 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : List[str]=4 , UpperCamelCase : List[str]=None , UpperCamelCase : Dict=10_00 , ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : List[str] = text_seq_length
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : Optional[int] = use_input_mask
lowerCAmelCase__ : Optional[Any] = use_token_type_ids
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : str = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Optional[int] = coordinate_size
lowerCAmelCase__ : Dict = shape_size
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : Union[str, Any] = num_choices
lowerCAmelCase__ : Optional[Any] = scope
lowerCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase__ : int = text_seq_length
lowerCAmelCase__ : Dict = (image_size // patch_size) ** 2 + 1
lowerCAmelCase__ : str = self.text_seq_length + self.image_seq_length
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase__ : List[str] = bbox[i, j, 3]
lowerCAmelCase__ : Dict = bbox[i, j, 1]
lowerCAmelCase__ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase__ : Any = bbox[i, j, 2]
lowerCAmelCase__ : Any = bbox[i, j, 0]
lowerCAmelCase__ : List[Any] = t
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : int = None
if self.use_input_mask:
lowerCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase__ : str = None
if self.use_token_type_ids:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
lowerCAmelCase__ : List[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
lowerCAmelCase__ : str = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ : Optional[int] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase__ : Union[str, Any] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : int , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : str = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ : str = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.num_labels
lowerCAmelCase__ : Any = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ : Any = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : int = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( A__ , A__ , unittest.TestCase ):
_lowerCamelCase :List[str] = False
_lowerCamelCase :Union[str, Any] = False
_lowerCamelCase :str = False
_lowerCamelCase :List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase :Optional[Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return True
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = LayoutLMvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
lowerCAmelCase__ : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCAmelCase__ : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
lowerCAmelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
lowerCAmelCase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
lowerCAmelCase__ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : Tuple = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowerCamelCase_ )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase_ )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 2]] )
lowerCAmelCase__ : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase__ : Dict = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 242 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Path , snake_case_ : str = None , snake_case_ : str = None , snake_case_ : str = None , ) ->List[Any]:
if config_name_or_path is None:
lowerCamelCase__ : Dict ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =question_encoder_name_or_path
lowerCamelCase__ : Optional[Any] =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ : Union[str, Any] =RagConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[int] =gen_config
lowerCamelCase__ : str =question_encoder_config
lowerCamelCase__ : str =model_class.from_pretrained_question_encoder_generator(
snake_case_ , snake_case_ , config=snake_case_ )
rag_model.save_pretrained(snake_case_ )
# Sanity check.
model_class.from_pretrained(snake_case_ )
# Save tokenizers.
lowerCamelCase__ : str =AutoTokenizer.from_pretrained(snake_case_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(snake_case_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 126 | 0 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=None ) -> List[Any]:
__lowerCAmelCase : Tuple = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowerCAmelCase , __lowerCAmelCase : List[str] = True, True
__lowerCAmelCase : int = dfs(__snake_case ,__snake_case ,__snake_case ,__snake_case )
return path
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : List[Any] = -1
for i in range(__snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__lowerCAmelCase : List[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : List[str] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__lowerCAmelCase , __lowerCAmelCase : List[Any] = check_circuit_or_path(__snake_case ,__snake_case )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
__lowerCAmelCase : Dict = 1
if check == 2:
__lowerCAmelCase : Union[str, Any] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
__lowerCAmelCase : List[Any] = dfs(__snake_case ,__snake_case ,__snake_case )
print(__snake_case )
def _lowercase ( ) -> Dict:
__lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__lowerCAmelCase : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__lowerCAmelCase : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__lowerCAmelCase : Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__lowerCAmelCase : List[str] = {
1: [],
2: []
# all degree is zero
}
__lowerCAmelCase : Dict = 10
check_euler(__snake_case ,__snake_case )
check_euler(__snake_case ,__snake_case )
check_euler(__snake_case ,__snake_case )
check_euler(__snake_case ,__snake_case )
check_euler(__snake_case ,__snake_case )
if __name__ == "__main__":
main() | 58 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__snake_case : Tuple = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ) -> str:
if rng is None:
__lowerCAmelCase : str = random.Random()
__lowerCAmelCase : List[Any] = 1
for dim in shape:
total_dims *= dim
__lowerCAmelCase : int = []
for _ in range(__snake_case ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__lowerCAmelCase : Dict = np.array(__snake_case ,dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowercase ( __snake_case ,__snake_case=None ) -> Optional[Any]:
__lowerCAmelCase : List[str] = ids_tensor(__snake_case ,vocab_size=2 ,rng=__snake_case )
# make sure that at least one token is attended to for each batch
__lowerCAmelCase : str = 1
return attn_mask
@require_flax
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = ()
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Dict = inputs["input_ids"].shape[-1] // 2
__lowerCAmelCase : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
__lowerCAmelCase : str = jnp.ones_like(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__lowerCAmelCase : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__lowerCAmelCase : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = max_length
__lowerCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = pt_model_class(_SCREAMING_SNAKE_CASE).eval()
__lowerCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , flax_model.params)
__lowerCAmelCase : int = flax_model.generate(_SCREAMING_SNAKE_CASE).sequences
__lowerCAmelCase : Any = pt_model.generate(torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__lowerCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jit(model.generate)
__lowerCAmelCase : List[str] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = jit(model.generate)
__lowerCAmelCase : Optional[Any] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self._get_input_ids_and_config()
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Any = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = jit(model.generate)
__lowerCAmelCase : Dict = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Any = max_length
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_input_ids_and_config()
__lowerCAmelCase : str = True
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Tuple = 0.8
__lowerCAmelCase : Any = 10
__lowerCAmelCase : Any = 0.3
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : int = 8
__lowerCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = jit(model.generate)
__lowerCAmelCase : str = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = self._get_input_ids_and_config()
__lowerCAmelCase : int = max_length
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = jit(model.generate)
__lowerCAmelCase : str = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_input_ids_and_config()
__lowerCAmelCase : Union[str, Any] = max_length
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : int = 8
__lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model.generate(_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jit(model.generate)
__lowerCAmelCase : Union[str, Any] = jit_generate(_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : Tuple = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : int = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jit(model.generate)
__lowerCAmelCase : Dict = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jit(model.generate)
__lowerCAmelCase : Any = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0)
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model.generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertEqual(generation_outputs.shape[-1] , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = jit(model.generate)
__lowerCAmelCase : int = jit_generate(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
__lowerCAmelCase : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
__lowerCAmelCase : Optional[Any] = "Hello world"
__lowerCAmelCase : str = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "do_samples"):
model.generate(_SCREAMING_SNAKE_CASE , do_samples=_SCREAMING_SNAKE_CASE)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "foo"):
__lowerCAmelCase : int = {"foo": "bar"}
model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) | 58 | 1 |
'''simple docstring'''
import re
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
if len(re.findall('''[ATCG]''' , __snake_case ) ) != len(__snake_case ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
def _a ( lowerCamelCase: dict ) -> bool:
'''simple docstring'''
__A = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__A = set()
return any(
node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for node in graph )
def _a ( lowerCamelCase: dict , lowerCamelCase: int , lowerCamelCase: set , lowerCamelCase: set ) -> bool:
'''simple docstring'''
visited.add(lowerCamelCase )
rec_stk.add(lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Dict = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_lowerCAmelCase : Tuple = json.load(f)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : str , A : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(A )
def snake_case_ ( self : Union[str, Any] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def snake_case_ ( self : Any , A : Dict , A : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_UpperCAmelCase : Any = f'facebook/wmt19-{pair}'
_UpperCAmelCase : Dict = self.get_tokenizer(A )
_UpperCAmelCase : Optional[int] = self.get_model(A )
_UpperCAmelCase : int = bleu_data[pair]["src"]
_UpperCAmelCase : Optional[int] = bleu_data[pair]["tgt"]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="pt" , truncation=A , padding="longest" ).to(A )
_UpperCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_UpperCAmelCase : Any = tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
_UpperCAmelCase : Any = calculate_bleu(A , A )
print(A )
self.assertGreaterEqual(scores["bleu"] , A )
| 202 | 1 |
"""simple docstring"""
def lowercase ( __snake_case : Dict ):
lowercase_ : str = [0] * len(_A )
lowercase_ : Optional[Any] = []
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowercase_ : Tuple = queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print('''Cycle exists''' )
else:
print(_A )
# Adjacency List of Graph
__A : Union[str, Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 33 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = module
UpperCAmelCase__ = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
UpperCAmelCase__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase_ : Optional[int] = 2.1_09_65_95_52_69_25_74
lowerCAmelCase_ : int = """Hello my name is"""
lowerCAmelCase_ : Any = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase_ : int = 10
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained(self.model_name )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , """quantization_config""" ) )
UpperCAmelCase__ = config.to_dict()
UpperCAmelCase__ = config.to_diff_dict()
UpperCAmelCase__ = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
UpperCAmelCase__ = self.model_fpaa.get_memory_footprint()
UpperCAmelCase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCAmelCase__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = BitsAndBytesConfig()
UpperCAmelCase__ = True
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase__ = self.model_fpaa.to(torch.floataa )
UpperCAmelCase__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCAmelCase__ = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
UpperCAmelCase__ = self.model_fpaa.half()
# Check this does not throw an error
UpperCAmelCase__ = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """t5-small"""
UpperCAmelCase__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCAmelCase__ = AutoTokenizer.from_pretrained(cls.model_name )
UpperCAmelCase__ = """Translate in German: Hello, my dog is cute"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
UpperCAmelCase__ = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCAmelCase__ = None
# test with `t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
UpperCAmelCase__ = modules
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# model_name
UpperCAmelCase__ = """bigscience/bloom-560m"""
UpperCAmelCase__ = """t5-small"""
# Different types of model
UpperCAmelCase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# Sequence classification model
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# CausalLM model
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# Seq2seq model
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCAmelCase__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
UpperCAmelCase__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """facebook/opt-350m"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCAmelCase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCAmelCase__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
UpperCAmelCase__ = LoRALayer(module.q_proj , rank=16 )
UpperCAmelCase__ = LoRALayer(module.k_proj , rank=16 )
UpperCAmelCase__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCAmelCase__ = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCAmelCase__ = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """gpt2-xl"""
lowerCAmelCase_ : Optional[Any] = 3.31_91_85_48_54_15_21_87
| 356 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase = datasets.logging.get_logger(__name__)
UpperCamelCase = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]="dummy_doc"):
lowercase__ : Dict = {doc: key_lines}
lowercase__ : Optional[int] = {doc: sys_lines}
lowercase__ : Tuple = {}
lowercase__ : Union[str, Any] = 0
lowercase__ : str = 0
lowercase__ : Tuple = 0
lowercase__ : Optional[int] = 0
lowercase__ : str = 0
lowercase__ : str = 0
lowercase__ , lowercase__ : Union[str, Any] = reader.get_doc_mentions(_lowerCamelCase , key_doc_lines[doc] , _lowerCamelCase)
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase__ : int = reader.set_annotated_parse_trees(_lowerCamelCase , key_doc_lines[doc] , _lowerCamelCase , _lowerCamelCase)
lowercase__ , lowercase__ : Union[str, Any] = reader.get_doc_mentions(_lowerCamelCase , sys_doc_lines[doc] , _lowerCamelCase)
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase__ : Optional[int] = reader.set_annotated_parse_trees(_lowerCamelCase , key_doc_lines[doc] , _lowerCamelCase , _lowerCamelCase)
if remove_nested:
lowercase__ , lowercase__ : List[str] = reader.remove_nested_coref_mentions(_lowerCamelCase , _lowerCamelCase)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase__ , lowercase__ : List[Any] = reader.remove_nested_coref_mentions(_lowerCamelCase , _lowerCamelCase)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase__ : List[str] = reader.get_mention_assignments(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = reader.get_mention_assignments(_lowerCamelCase , _lowerCamelCase)
lowercase__ : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"Number of resulting singleton clusters in the key "
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively")
return doc_coref_infos
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Tuple = get_coref_infos(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : List[Any] = {}
lowercase__ : List[str] = 0
lowercase__ : List[Any] = 0
for name, metric in metrics:
lowercase__ , lowercase__ , lowercase__ : Dict = evaluator.evaluate_documents(_lowerCamelCase , _lowerCamelCase , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa})
logger.info(
name.ljust(10) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
lowercase__ : str = (conll / 3) * 100
logger.info(f'''CoNLL score: {conll:.2f}''')
output_scores.update({"conll_score": conll})
return output_scores
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : List[str] = False
for line in key_lines:
if not line.startswith("#"):
if len(line.split()) > 6:
lowercase__ : Union[str, Any] = line.split()[5]
if not parse_col == "-":
lowercase__ : str = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=False , lowercase_ : Any=False ) -> Dict:
lowercase__ : int = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowercase__ : Optional[int] = util.check_gold_parse_annotation(lowercase_ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase__ : Any = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 87 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCamelCase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCamelCase = '''UperNetConfig'''
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , bias=lowerCAmelCase__ , dilation=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.ReLU()
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = self.conv(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.batch_norm(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCAmelCase__ )
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = [
nn.AdaptiveAvgPoolad(lowerCAmelCase__ ),
UperNetConvModule(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = input
for layer in self.layers:
SCREAMING_SNAKE_CASE = layer(lowerCAmelCase__ )
return hidden_state
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = pool_scales
SCREAMING_SNAKE_CASE = align_corners
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = channels
SCREAMING_SNAKE_CASE = []
for i, pool_scale in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = UperNetPyramidPoolingBlock(pool_scale=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , channels=lowerCAmelCase__ )
self.blocks.append(lowerCAmelCase__ )
self.add_module(str(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> List[torch.Tensor]:
SCREAMING_SNAKE_CASE = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE = ppm(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(lowerCAmelCase__ )
return ppm_outs
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = config.hidden_size
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE = nn.ModuleList()
SCREAMING_SNAKE_CASE = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE = UperNetConvModule(lowerCAmelCase__ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCAmelCase__ )
self.fpn_convs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __A ( self ) -> int:
self.apply(self._init_weights )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = inputs[-1]
SCREAMING_SNAKE_CASE = [x]
psp_outs.extend(self.psp_modules(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=1 )
SCREAMING_SNAKE_CASE = self.bottleneck(lowerCAmelCase__ )
return output
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
# build laterals
SCREAMING_SNAKE_CASE = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCAmelCase__ ) )
# build top-down path
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCAmelCase__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=1 )
SCREAMING_SNAKE_CASE = self.fpn_bottleneck(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.classifier(lowerCAmelCase__ )
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE = config.auxiliary_channels
SCREAMING_SNAKE_CASE = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE = in_index
SCREAMING_SNAKE_CASE = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , dilation=lowerCAmelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , dilation=lowerCAmelCase__ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE = nn.Identity()
else:
SCREAMING_SNAKE_CASE = nn.Sequential(*lowerCAmelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCAmelCase__ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __A ( self ) -> Dict:
self.apply(self._init_weights )
def __A ( self , lowerCAmelCase__ ) -> Dict:
if isinstance(lowerCAmelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
# just take the relevant feature maps
SCREAMING_SNAKE_CASE = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE = self.convs(lowerCAmelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE = self.classifier(lowerCAmelCase__ )
return output
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = UperNetConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = """pixel_values"""
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __A ( self , lowerCAmelCase__ ) -> List[str]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __A ( self ) -> int:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = value
__UpperCamelCase = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCamelCase_ , )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE = UperNetHead(lowerCAmelCase__ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE = UperNetFCNHead(lowerCAmelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def __A ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE = self.backbone.forward_with_filtered_kwargs(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.feature_maps
SCREAMING_SNAKE_CASE = self.decode_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(lowerCAmelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE = self.auxiliary_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 113 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda _lowerCAmelCase : x[0] / x[1] , reverse=_lowerCAmelCase )
__lowercase , __lowercase =[i[0] for i in r], [i[1] for i in r]
__lowercase =list(accumulate(_lowerCAmelCase ) )
__lowercase =bisect(_lowerCAmelCase , _lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
import functools
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
__lowercase =len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowercase =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase = 1000 ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1, 1
SCREAMING_SNAKE_CASE__ : List[str] = []
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : int = prev_numerator + 2 * prev_denominator
SCREAMING_SNAKE_CASE__ : Any = prev_numerator + prev_denominator
if len(str(__lowerCAmelCase ) ) > len(str(__lowerCAmelCase ) ):
result.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = numerator
SCREAMING_SNAKE_CASE__ : Union[str, Any] = denominator
return len(__lowerCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 132 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a :str = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
a :List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
a :int = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __a (datasets.Metric):
'''simple docstring'''
def _a ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _a ( self , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0
for i, j in zip(_a , _a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a , _a ) else 0.0
SCREAMING_SNAKE_CASE__ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 132 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
'''simple docstring'''
def __init__( self : int , lowercase : List[Any]=-1 ):
'''simple docstring'''
_snake_case = label_idx
def A ( self : List[Any] , lowercase : str , lowercase : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a , _a ):
_snake_case = mode.value
_snake_case = os.path.join(_a , f'''{mode}.txt''' )
_snake_case = 1
_snake_case = []
with open(_a , encoding='utf-8' ) as f:
_snake_case = []
_snake_case = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_a , labels=_a ) )
guid_index += 1
_snake_case = []
_snake_case = []
else:
_snake_case = line.split(' ' )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_a , labels=_a ) )
return examples
def A ( self : Optional[Any] , lowercase : TextIO , lowercase : TextIO , lowercase : List ):
'''simple docstring'''
_snake_case = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def A ( self : Any , lowercase : str ):
'''simple docstring'''
if path:
with open(_a , 'r' ) as f:
_snake_case = f.read().splitlines()
if "O" not in labels:
_snake_case = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def A ( self : str , lowercase : str ):
'''simple docstring'''
if path:
with open(_a , 'r' ) as f:
_snake_case = f.read().splitlines()
if "O" not in labels:
_snake_case = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
'''simple docstring'''
def A ( self : Any , lowercase : List[Any] , lowercase : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a , _a ):
_snake_case = mode.value
_snake_case = os.path.join(_a , f'''{mode}.txt''' )
_snake_case = 1
_snake_case = []
with open(_a , encoding='utf-8' ) as f:
for sentence in parse_incr(_a ):
_snake_case = []
_snake_case = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_a , labels=_a ) )
guid_index += 1
return examples
def A ( self : Tuple , lowercase : TextIO , lowercase : TextIO , lowercase : List ):
'''simple docstring'''
_snake_case = 0
for sentence in parse_incr(_a ):
_snake_case = preds_list[example_id]
_snake_case = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_a )
example_id += 1
def A ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
if path:
with open(_a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 355 |
import baseaa
def a_ ( __lowercase : str ) -> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __lowercase : bytes ) -> str:
return baseaa.aaadecode(__lowercase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''ResNetConfig'''
# Base docstring
SCREAMING_SNAKE_CASE_ : Any = '''microsoft/resnet-50'''
SCREAMING_SNAKE_CASE_ : Dict = [1, 2_0_4_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''microsoft/resnet-50'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''tiger cat'''
SCREAMING_SNAKE_CASE_ : List[str] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int = 3 , UpperCamelCase: int = 1 , UpperCamelCase: str = "relu" ):
"""simple docstring"""
super().__init__()
A__ = nn.Convad(
_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=kernel_size // 2 , bias=_lowerCAmelCase )
A__ = nn.BatchNormad(_lowerCAmelCase )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase ( self: Dict , UpperCamelCase: Tensor ):
"""simple docstring"""
A__ = self.convolution(_lowerCAmelCase )
A__ = self.normalization(_lowerCAmelCase )
A__ = self.activation(_lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: ResNetConfig ):
"""simple docstring"""
super().__init__()
A__ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
A__ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
A__ = config.num_channels
def UpperCamelCase ( self: List[str] , UpperCamelCase: Tensor ):
"""simple docstring"""
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
A__ = self.embedder(_lowerCAmelCase )
A__ = self.pooler(_lowerCAmelCase )
return embedding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int = 2 ):
"""simple docstring"""
super().__init__()
A__ = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 , stride=_lowerCAmelCase , bias=_lowerCAmelCase )
A__ = nn.BatchNormad(_lowerCAmelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Tensor ):
"""simple docstring"""
A__ = self.convolution(_lowerCAmelCase )
A__ = self.normalization(_lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int = 1 , UpperCamelCase: str = "relu" ):
"""simple docstring"""
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = (
ResNetShortCut(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , activation=_lowerCAmelCase ) , )
A__ = ACTaFN[activation]
def UpperCamelCase ( self: List[str] , UpperCamelCase: int ):
"""simple docstring"""
A__ = hidden_state
A__ = self.layer(_lowerCAmelCase )
A__ = self.shortcut(_lowerCAmelCase )
hidden_state += residual
A__ = self.activation(_lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int = 1 , UpperCamelCase: str = "relu" , UpperCamelCase: int = 4 ):
"""simple docstring"""
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = out_channels // reduction
A__ = (
ResNetShortCut(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase ) , )
A__ = ACTaFN[activation]
def UpperCamelCase ( self: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = hidden_state
A__ = self.layer(_lowerCAmelCase )
A__ = self.shortcut(_lowerCAmelCase )
hidden_state += residual
A__ = self.activation(_lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: ResNetConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , ):
"""simple docstring"""
super().__init__()
A__ = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , activation=config.hidden_act ) , *[layer(_lowerCAmelCase , _lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCamelCase ( self: int , UpperCamelCase: Tensor ):
"""simple docstring"""
A__ = input
for layer in self.layers:
A__ = layer(_lowerCAmelCase )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: ResNetConfig ):
"""simple docstring"""
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase ) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Tensor , UpperCamelCase: bool = False , UpperCamelCase: bool = True ):
"""simple docstring"""
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(_lowerCAmelCase )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase , )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ResNetConfig
UpperCAmelCase = "resnet"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = True
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: str=False ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A__ = value
SCREAMING_SNAKE_CASE_ : Any = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
super().__init__(_lowerCAmelCase )
A__ = config
A__ = ResNetEmbeddings(_lowerCAmelCase )
A__ = ResNetEncoder(_lowerCAmelCase )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: str , UpperCamelCase: Tensor , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(_lowerCAmelCase )
A__ = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
A__ = encoder_outputs[0]
A__ = self.pooler(_lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: str ):
"""simple docstring"""
super().__init__(_lowerCAmelCase )
A__ = config.num_labels
A__ = ResNetModel(_lowerCAmelCase )
# classification head
A__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.FloatTensor] = None , UpperCamelCase: Optional[torch.LongTensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.resnet(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(_lowerCAmelCase )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase, _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(_lowerCAmelCase )
super()._init_backbone(_lowerCAmelCase )
A__ = [config.embedding_size] + config.hidden_sizes
A__ = ResNetEmbeddings(_lowerCAmelCase )
A__ = ResNetEncoder(_lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@replace_return_docstrings(output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Tensor , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = self.embedder(_lowerCAmelCase )
A__ = self.encoder(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
A__ = outputs.hidden_states
A__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowerCAmelCase , )
| 335 |
from math import pi
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 159 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( lowercase : list[int] ) -> bool:
"""simple docstring"""
return len(set(lowercase ) ) == len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( lowercase : int ) -> Tuple:
"""simple docstring"""
snake_case : Any = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , lowercase ).groups()[0]
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = file_names
snake_case : Optional[Any] = image_transform
snake_case : Optional[int] = label_to_id
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.file_names )
def __getitem__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : str = self.file_names[idx]
snake_case : Any = PIL.Image.open(UpperCamelCase__ )
snake_case : Optional[int] = raw_image.convert("RGB" )
if self.image_transform is not None:
snake_case : Optional[Any] = self.image_transform(UpperCamelCase__ )
snake_case : Optional[Any] = extract_label(UpperCamelCase__ )
if self.label_to_id is not None:
snake_case : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( lowercase : Any , lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if args.with_tracking:
snake_case : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : Union[str, Any] = int(config["num_epochs"] )
snake_case : str = int(config["seed"] )
snake_case : str = int(config["batch_size"] )
snake_case : Any = config["image_size"]
if not isinstance(lowercase , (list, tuple) ):
snake_case : str = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
snake_case : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case : Any = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
snake_case : List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case : Union[str, Any] = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
snake_case : int = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
snake_case : Union[str, Any] = [extract_label(lowercase ) for fname in file_names]
snake_case : Any = list(set(lowercase ) )
id_to_label.sort()
snake_case : int = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
snake_case : Optional[Any] = np.random.permutation(len(lowercase ) )
snake_case : int = int(0.8 * len(lowercase ) )
snake_case : int = random_perm[:cut]
snake_case : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case : List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
snake_case : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
snake_case : Optional[Any] = Compose([Resize(lowercase ), ToTensor()] )
snake_case : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
snake_case : Optional[Any] = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
snake_case : Tuple = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[int] = create_model("resnet50d" , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Any = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case : Dict = False
for param in model.get_classifier().parameters():
snake_case : List[Any] = True
# We normalize the batches of images to be a bit faster.
snake_case : Dict = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
snake_case : Union[str, Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
snake_case : Dict = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : List[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
snake_case : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
snake_case : List[str] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case : Union[str, Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
snake_case : Any = int(training_difference.replace("epoch_" , "" ) ) + 1
snake_case : int = None
else:
snake_case : Any = int(training_difference.replace("step_" , "" ) )
snake_case : Optional[int] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
snake_case : Union[str, Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case : List[str] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case : Optional[int] = (batch["image"] - mean) / std
snake_case : str = model(lowercase )
snake_case : Dict = torch.nn.functional.cross_entropy(lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
snake_case : Any = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case : List[str] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
snake_case : List[str] = 0
snake_case : List[str] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case : Tuple = (batch["image"] - mean) / std
with torch.no_grad():
snake_case : Optional[int] = model(lowercase )
snake_case : List[Any] = outputs.argmax(dim=-1 )
snake_case ,snake_case : int = accelerator.gather_for_metrics((predictions, batch["label"]) )
snake_case : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
snake_case : Optional[Any] = F'epoch_{epoch}'
if args.output_dir is not None:
snake_case : Union[str, Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=lowercase , default=lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=lowercase , default=lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
snake_case : Optional[Any] = parser.parse_args()
snake_case : List[str] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 112 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE__ : int = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() ,__UpperCamelCase ) ) ,x.transpose() ) ,__UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 2, 1)
SCREAMING_SNAKE_CASE__ : List[str] = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE__ : List[Any] = SARIMAX(
__UpperCamelCase ,exog=__UpperCamelCase ,order=__UpperCamelCase ,seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = model.fit(disp=__UpperCamelCase ,maxiter=600 ,method="""nm""" )
SCREAMING_SNAKE_CASE__ : List[Any] = model_fit.predict(1 ,len(__UpperCamelCase ) ,exog=[test_match] )
return result[0]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = SVR(kernel="""rbf""" ,C=1 ,gamma=0.1 ,epsilon=0.1 )
regressor.fit(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = regressor.predict(__UpperCamelCase )
return y_pred[0]
def lowercase_ ( _snake_case ):
train_user.sort()
SCREAMING_SNAKE_CASE__ : Any = np.percentile(__UpperCamelCase ,25 )
SCREAMING_SNAKE_CASE__ : str = np.percentile(__UpperCamelCase ,75 )
SCREAMING_SNAKE_CASE__ : List[Any] = qa - qa
SCREAMING_SNAKE_CASE__ : Optional[Any] = qa - (iqr * 0.1)
return low_lim
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE__ : Optional[int] = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase__ : str = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
UpperCAmelCase__ : int = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
UpperCAmelCase__ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase__ : Optional[Any] = normalize_df[:, 2].tolist()
UpperCAmelCase__ : Union[str, Any] = normalize_df[:, 0].tolist()
UpperCAmelCase__ : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase__ : Any = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase__ : int = x[: len(x) - 1]
UpperCAmelCase__ : Union[str, Any] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase__ : Optional[int] = total_date[: len(total_date) - 1]
UpperCAmelCase__ : Any = total_user[: len(total_user) - 1]
UpperCAmelCase__ : Optional[Any] = total_match[: len(total_match) - 1]
UpperCAmelCase__ : Dict = total_date[len(total_date) - 1 :]
UpperCAmelCase__ : str = total_user[len(total_user) - 1 :]
UpperCAmelCase__ : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase__ : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase__ : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 25 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase ( ):
"""simple docstring"""
__A = cn.convert_to_negative(__UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase ( ):
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__A = canny.canny(__UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
assert res.any()
def lowerCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(__UpperCamelCase , 3 ).any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A , __A = sob.sobel_filter(__UpperCamelCase )
assert grad.any() and theta.any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = sp.make_sepia(__UpperCamelCase , 2_0 )
assert sepia.all()
def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
__A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
__A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__A = imread(__UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__A = 0
__A = 0
__A = image[x_coordinate][y_coordinate]
__A = lbp.get_neighbors_pixel(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__A = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert lbp_image.any()
| 266 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "donut-swin"
__snake_case = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _UpperCAmelCase=2_24 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(_UpperCAmelCase )
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) ) | 267 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 267 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = """layoutlmv3"""
def __init__( self , a=5_0265 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-5 , a=1 , a=0 , a=2 , a=1024 , a=128 , a=128 , a=True , a=32 , a=128 , a=64 , a=256 , a=True , a=True , a=True , a=224 , a=3 , a=16 , a=None , **a , ):
super().__init__(
vocab_size=a , hidden_size=a , num_hidden_layers=a , num_attention_heads=a , intermediate_size=a , hidden_act=a , hidden_dropout_prob=a , attention_probs_dropout_prob=a , max_position_embeddings=a , type_vocab_size=a , initializer_range=a , layer_norm_eps=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
lowercase__ : Tuple = max_ad_position_embeddings
lowercase__ : Optional[int] = coordinate_size
lowercase__ : Optional[int] = shape_size
lowercase__ : Dict = has_relative_attention_bias
lowercase__ : str = rel_pos_bins
lowercase__ : List[str] = max_rel_pos
lowercase__ : List[str] = has_spatial_attention_bias
lowercase__ : str = rel_ad_pos_bins
lowercase__ : Union[str, Any] = max_rel_ad_pos
lowercase__ : Optional[Any] = text_embed
lowercase__ : List[Any] = visual_embed
lowercase__ : Any = input_size
lowercase__ : List[str] = num_channels
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Union[str, Any] = classifier_dropout
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[Any] = version.parse("""1.12""" )
@property
def snake_case_ ( self):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def snake_case_ ( self):
return 1e-5
@property
def snake_case_ ( self):
return 12
def snake_case_ ( self , a , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , a)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : str = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : Dict = processor.tokenizer.num_special_tokens_to_add(a)
lowercase__ : Any = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a)
# Generate dummy inputs according to compute batch and sequence
lowercase__ : str = [[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ : Optional[int] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ : List[Any] = self._generate_dummy_images(a , a , a , a)
lowercase__ : Tuple = dict(
processor(
a , text=a , boxes=a , return_tensors=a , ))
return inputs
| 214 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[str] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
lowercase__ : Union[str, Any] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
snake_case_ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 214 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = RobertaPreLayerNormConfig.from_pretrained(
snake_case_,architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_A : int = torch.load(hf_hub_download(repo_id=snake_case_,filename="""pytorch_model.bin""" ) )
_A : Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_A : List[str] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_A : int = tensor_value
_A : int = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case_,config=snake_case_,state_dict=snake_case_ )
model.save_pretrained(snake_case_ )
# convert tokenizer
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 363 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 0 |
import math
def __lowercase ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase_ : int = range(3 , int(math.sqrt(lowerCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Optional[int]=1 , **lowerCamelCase : List[str] ):
UpperCamelCase_ : str = factor * value
UpperCamelCase_ : List[Any] = value
while not is_prime(lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase )
return value
| 175 | import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ = False
try:
a_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _lowercase :
def __init__( self : Dict , snake_case : str = None , snake_case : list = [] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = choices
UpperCamelCase_ : Any = prompt
if sys.platform == "win32":
UpperCamelCase_ : Optional[Any] = '*'
else:
UpperCamelCase_ : str = '➔ '
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[str] , snake_case : str = "" ) -> List[Any]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case )
else:
forceWrite(self.choices[index] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : int ) -> List[Any]:
"""simple docstring"""
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(snake_case )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Direction , snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case )
move_cursor(snake_case , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case )] for number in range(1_0 )] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = int(chr(self.current_selection ) )
UpperCamelCase_ : Optional[int] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
UpperCamelCase_ : Optional[int] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCamelCase_ : Tuple = int(builtins.input() )
except ValueError:
UpperCamelCase_ : Tuple = default_choice
else:
UpperCamelCase_ : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(snake_case , '\n' )
return choice
| 175 | 1 |
'''simple docstring'''
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : str = "" , lowerCAmelCase__ : bool = False ) -> None:
"""simple docstring"""
_UpperCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase : Union[str, Any] = is_leaf
_UpperCAmelCase : Any = prefix
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : str ) -> tuple[str, str, str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = 0
for q, w in zip(self.prefix , lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : list[str] ) -> None:
"""simple docstring"""
for word in words:
self.insert(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : str ) -> None:
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase : Dict = RadixNode(prefix=lowerCAmelCase__ , is_leaf=lowerCAmelCase__ )
else:
_UpperCAmelCase : Dict = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase : Optional[Any] = remaining_prefix
_UpperCAmelCase : Optional[Any] = self.nodes[matching_string[0]]
_UpperCAmelCase : Optional[Any] = RadixNode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = aux_node
if remaining_word == "":
_UpperCAmelCase : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
_UpperCAmelCase : int = self.nodes.get(word[0] , lowerCAmelCase__ )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.nodes.get(word[0] , lowerCAmelCase__ )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase : Optional[int] = list(self.nodes.values() )[0]
_UpperCAmelCase : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase : Any = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase : int = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase : str = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase : str = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase : Optional[int] = merging_node.nodes
return True
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[Any] = "banana bananas bandana band apple all beast".split()
_UpperCAmelCase : Union[str, Any] = RadixNode()
root.insert_many(a_ )
assert all(root.find(a_ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __UpperCAmelCase ( ):
assert test_trie()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = RadixNode()
_UpperCAmelCase : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(a_ )
print("Words:", a_ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main() | 17 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__a = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase : List[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels"
_UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features]
_UpperCAmelCase : str = len(lowerCAmelCase__ )
_UpperCAmelCase : int = len(features[0]["input_ids"] )
_UpperCAmelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
_UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) )
_UpperCAmelCase : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", a_, a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
_UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Optional[Any] = data_args.validation_file
_UpperCAmelCase : Dict = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[int] = load_dataset(
a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : Dict = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )]
_UpperCAmelCase : List[Any] = "sent1"
_UpperCAmelCase : Optional[int] = "sent2"
if data_args.max_seq_length is None:
_UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_UpperCAmelCase : Dict = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : Tuple = examples[question_header_name]
_UpperCAmelCase : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ )
]
# Flatten out
_UpperCAmelCase : List[str] = list(chain(*a_ ) )
_UpperCAmelCase : Dict = list(chain(*a_ ) )
# Tokenize
_UpperCAmelCase : List[Any] = tokenizer(
a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : int = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples )
_UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples )
_UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase : Optional[int] = eval_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_UpperCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a_: Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions
_UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : Any = Trainer(
model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : List[str] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : str = train_result.metrics
_UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
_UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) )
trainer.log_metrics("train", a_ )
trainer.save_metrics("train", a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
_UpperCAmelCase : Tuple = min(a_, len(a_ ) )
trainer.log_metrics("eval", a_ )
trainer.save_metrics("eval", a_ )
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __UpperCAmelCase ( a_: int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 17 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase_ : int = '''bart'''
UpperCAmelCase_ : int = True
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCamelCase :Optional[int] = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase :Optional[int] = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase :Any = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCamelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCamelCase :Optional[int] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCamelCase :Optional[Any] = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase :Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Tuple = faiss.StandardGpuResources()
UpperCamelCase :Dict = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCamelCase :str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 )
UpperCamelCase :Optional[int] = faiss.index_cpu_to_gpu(__magic_name__ , 1 , __magic_name__ )
wikiaab_gpu_index_flat.add(__magic_name__ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase :Optional[Any] = (None, None)
UpperCamelCase :List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
UpperCamelCase :Dict = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCamelCase :List[Any] = elia["""train_eli5"""]
UpperCamelCase :Optional[Any] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCamelCase :Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__magic_name__ )
return (elia_train, eli5_train_q_index)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_indexes()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = load_models()
UpperCAmelCase_ , UpperCAmelCase_ : str = load_train_data()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Any=10 ) -> Any:
"""simple docstring"""
UpperCamelCase :List[str] = embed_questions_for_retrieval([question] , __magic_name__ , __magic_name__ )
UpperCamelCase , UpperCamelCase :int = eli5_train_q_index.search(__magic_name__ , __magic_name__ )
UpperCamelCase :Any = [elia_train[int(__magic_name__ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str]="wiki40b" , __magic_name__ : str="dense" , __magic_name__ : Tuple=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
UpperCamelCase , UpperCamelCase :Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase :List[Any] = query_qa_dense_index(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
UpperCamelCase , UpperCamelCase :List[Any] = query_es_index(
__magic_name__ , __magic_name__ , index_name="""english_wiki40b_snippets_100w""" , n_results=__magic_name__ , )
UpperCamelCase :str = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCamelCase :Tuple = """question: {} context: {}""".format(__magic_name__ , __magic_name__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __magic_name__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __magic_name__ : None),
} )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=64 , __magic_name__ : int=256 , __magic_name__ : Dict=False , __magic_name__ : str=2 , __magic_name__ : str=0.95 , __magic_name__ : Dict=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase :Optional[Any] = qa_sas_generate(
__magic_name__ , __magic_name__ , __magic_name__ , num_answers=1 , num_beams=__magic_name__ , min_len=__magic_name__ , max_len=__magic_name__ , do_sample=__magic_name__ , temp=__magic_name__ , top_p=__magic_name__ , top_k=__magic_name__ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCAmelCase_ : List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
UpperCAmelCase_ : Union[str, Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase_ : List[str] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase_ : Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
UpperCAmelCase_ : Tuple = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCAmelCase_ : str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCAmelCase_ : str = action_list.index(action_st)
UpperCAmelCase_ : int = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCAmelCase_ : str = show_type == '''Show full text of passages'''
else:
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCAmelCase_ : Any = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
UpperCAmelCase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCAmelCase_ : List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCAmelCase_ : Optional[Any] = '''wiki40b'''
UpperCAmelCase_ : Any = '''dense'''
UpperCAmelCase_ : int = '''beam'''
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : str = 2_56
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCAmelCase_ : Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCAmelCase_ : int = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase_ : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Optional[int] = None
# start main text
UpperCAmelCase_ : Optional[int] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
UpperCAmelCase_ : List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase_ : Union[str, Any] = st.text_input('''Enter your question here:''', '''''')
else:
UpperCAmelCase_ : Optional[Any] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase_ , UpperCAmelCase_ : Dict = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCAmelCase_ : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase_ : Any = support_list[:10]
UpperCAmelCase_ : Optional[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase_ , UpperCAmelCase_ : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCAmelCase_ : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
UpperCAmelCase_ : List[Any] = res[1].strip()
if sec_titles == "":
UpperCAmelCase_ : Union[str, Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
UpperCAmelCase_ : str = sec_titles.split(''' & ''')
UpperCAmelCase_ : str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase_ : Any = find_nearest_training(question)
UpperCAmelCase_ : Optional[int] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCAmelCase_ : Dict = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCAmelCase_ : int = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 38 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 0 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.01 , SCREAMING_SNAKE_CASE = 1 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = False
__UpperCamelCase :Tuple = search_prob
__UpperCamelCase :int = start_temperate
__UpperCamelCase :Any = []
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Tuple = None
while not search_end:
__UpperCamelCase :Any = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCamelCase :str = current_state
scores.append(SCREAMING_SNAKE_CASE )
iterations += 1
__UpperCamelCase :Dict = None
__UpperCamelCase :Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCamelCase :Optional[int] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
__UpperCamelCase :Optional[Any] = neighbors.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCamelCase :Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCamelCase :Dict = picked_neighbor
else:
__UpperCamelCase :Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCamelCase :Dict = picked_neighbor
__UpperCamelCase :Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCamelCase :Tuple = True
else:
__UpperCamelCase :int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__lowercase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
__lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
__lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 358 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = 0
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Dict = Path(__lowercase) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :Union[str, Any] = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :str = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Union[str, Any] = Path(__lowercase) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Optional[Any] = Path(__lowercase) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase).to_dict()
config_dict.pop('''image_processor_type''')
__UpperCamelCase :List[str] = CLIPImageProcessor(**__lowercase)
# save in new folder
model_config.save_pretrained(__lowercase)
config.save_pretrained(__lowercase)
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase)
# make sure private variable is not incorrectly saved
__UpperCamelCase :Union[str, Any] = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
__lowercase , '''clip-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''')
def UpperCamelCase__ ( self) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :str = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> List[str]:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def UpperCamelCase__ ( self) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
__UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase)
__UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def UpperCamelCase__ ( self) -> Optional[Any]:
try:
AutoConfig.register('''custom''' , __lowercase)
AutoImageProcessor.register(__lowercase , __lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoImageProcessor.register(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :List[str] = Path(__lowercase) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :int = CustomImageProcessor.from_pretrained(__lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase)
__UpperCamelCase :int = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> List[Any]:
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = True
try:
AutoConfig.register('''custom''' , __lowercase)
AutoImageProcessor.register(__lowercase , __lowercase)
# If remote code is not set, the default is to use local
__UpperCamelCase :str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(__lowercase , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 105 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :Any = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
__A : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__A : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = {}
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = line.strip()
if line:
_UpperCAmelCase = line.split()
_UpperCAmelCase = line_number
_UpperCAmelCase = words[0]
_UpperCAmelCase = value
return result
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
_UpperCAmelCase = value[0]
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
_UpperCAmelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase = '''.'''.join([key, hf_param_name] )
else:
_UpperCAmelCase = key
_UpperCAmelCase = value if '''lm_head''' in full_key else value[0]
__A : Any = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
_UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
_UpperCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCAmelCase = '''weight_v'''
elif "bias" in name:
_UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = '''weight'''
else:
_UpperCAmelCase = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCAmelCase = True
else:
_UpperCAmelCase = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCAmelCase = name.split('''.''' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
_UpperCAmelCase = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = idalabel
_UpperCAmelCase = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 0
_UpperCAmelCase = 1
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
_UpperCAmelCase = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__A : List[Any] = parser.parse_args()
__A : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
_UpperCAmelCase : List[str] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
DownloadCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
RunCommand.register_subcommand(_UpperCAmelCase )
ServeCommand.register_subcommand(_UpperCAmelCase )
UserCommands.register_subcommand(_UpperCAmelCase )
AddNewModelCommand.register_subcommand(_UpperCAmelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCAmelCase )
LfsCommands.register_subcommand(_UpperCAmelCase )
PTtoTFCommand.register_subcommand(_UpperCAmelCase )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(_UpperCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : List[Any] = args.func(_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 31 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = False
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__lowerCAmelCase : Optional[int] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__lowerCAmelCase : str = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowerCAmelCase : Any = reader.read()
__lowerCAmelCase : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowerCAmelCase : Any = UNetaDModel(**config)
else:
__lowerCAmelCase : List[str] = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__lowerCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase : Dict = config[key]
del config[key]
__lowerCAmelCase : int = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__lowerCAmelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__lowerCAmelCase : Any = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowerCAmelCase : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowerCAmelCase : Dict = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowerCAmelCase : Union[str, Any] = param_value
__lowerCAmelCase : str = True
if not has_changed:
__lowerCAmelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 156 | 0 |
from math import factorial
def snake_case (UpperCAmelCase__ = 2_0 ) -> int:
UpperCamelCase_: List[str] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase_: int = n // 2
return int(factorial(UpperCAmelCase__ ) / (factorial(UpperCAmelCase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A_ : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.') | 292 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Dict = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''xglm'''
a : List[Any] =['''past_key_values''']
a : Union[str, Any] ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=2_5_6_0_0_8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=2_4 , _lowerCamelCase=1_6 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: List[str] = d_model
UpperCamelCase_: List[Any] = ffn_dim
UpperCamelCase_: List[Any] = num_layers
UpperCamelCase_: List[Any] = attention_heads
UpperCamelCase_: Tuple = activation_function
UpperCamelCase_: Tuple = dropout
UpperCamelCase_: Tuple = attention_dropout
UpperCamelCase_: Optional[Any] = activation_dropout
UpperCamelCase_: List[str] = layerdrop
UpperCamelCase_: Any = init_std
UpperCamelCase_: Any = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_: Union[str, Any] = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , ) | 292 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
__A : Optional[Any] = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def lowercase ( __snake_case : str , __snake_case : int = 1 , __snake_case : str = "new" , __snake_case : list | None = None ):
lowercase_ : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
lowercase_ : Union[str, Any] = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__snake_case )
lowercase_ : Optional[Any] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
lowercase_ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
lowercase_ : str = {}
for id_ in range(__snake_case ):
lowercase_ : Dict = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 33 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a ( lowerCamelCase__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
lowerCAmelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase = {'''unk_token''': '''<unk>'''}
lowerCAmelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
lowerCAmelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = os.path.join(self.tmpdirname , 'rag_tokenizer' )
lowerCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__A )
rag_tokenizer.save_pretrained(__A )
lowerCAmelCase = RagTokenizer.from_pretrained(__A , config=__A )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __A )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __A )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
lowerCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCAmelCase = tokenizer(__A )
self.assertIsNotNone(__A )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
lowerCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCAmelCase = tokenizer(__A )
self.assertIsNotNone(__A )
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = analyze_text(__A)
_a = list(''' ''' + ascii_lowercase)
# what is our total sum of probabilities.
_a = sum(single_char_strings.values())
# one length string
_a = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_a = single_char_strings[ch]
_a = my_str / all_sum
my_fir_sum += prob * math.loga(__A) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum):.1f}''')
# two len string
_a = sum(two_char_strings.values())
_a = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_a = cha + cha
if sequence in two_char_strings:
_a = two_char_strings[sequence]
_a = int(__A) / all_sum
my_sec_sum += prob * math.loga(__A)
# print second entropy
print(F'''{round(-1 * my_sec_sum):.1f}''')
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}''')
def lowerCAmelCase (__A):
"""simple docstring"""
_a = Counter() # type: ignore
_a = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__A) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCAmelCase ():
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 211 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase_ = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
lowercase_ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['input_ids', 'attention_mask']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__(self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_a = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , )
_a = vocab_file
_a = load_json(A )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(A , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else '''en'''
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def a__ (self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__ (self , A ) -> None:
"""simple docstring"""
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A , self.encoder[self.unk_token] )
def a__ (self , A ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A , self.unk_token )
def a__ (self , A ) -> Dict:
"""simple docstring"""
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
_a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ (self , A , A = None , A = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self ) -> Dict:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> None:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
_a = Path(A )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A )
elif not os.path.isfile(self.spm_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def a__ (self , A , A = "en" , A = None , A = "ro" , **A , ) -> BatchEncoding:
"""simple docstring"""
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A , A , **A )
def a__ (self , A , A , A , **A ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(A , add_special_tokens=A , **A )
_a = self.get_lang_id(A )
_a = tgt_lang_id
return inputs
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ) -> Tuple:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a__ (self , A ) -> int:
"""simple docstring"""
_a = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = sentencepiece.SentencePieceProcessor(**__A)
spm.Load(str(__A))
return spm
def lowerCAmelCase (__A):
"""simple docstring"""
with open(__A , '''r''') as f:
return json.load(__A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
with open(__A , '''w''') as f:
json.dump(__A , __A , indent=2)
| 211 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ProphetNetTokenizer
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setUp()
_a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Any = 'UNwant\u00E9d,running'
_a : Any = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[9, 6, 7, 12, 10, 11] )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[Any] = BasicTokenizer(do_lower_case=_a ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_a : List[str] = {}
for i, token in enumerate(_a ):
_a : List[str] = i
_a : Any = WordpieceTokenizer(vocab=_a ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_a : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : str = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
_a : Union[str, Any] = tokenizer(_a ,padding=_a ,return_tensors='pt' )
self.assertIsInstance(_a ,_a )
_a : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_a ,_a )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def __lowercase ( self : int ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowercase ( self : str ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_a : Tuple = tokenizer.encode('sequence builders' ,add_special_tokens=_a )
_a : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=_a )
_a : int = tokenizer.build_inputs_with_special_tokens(_a )
_a : str = tokenizer.build_inputs_with_special_tokens(_a ,_a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , )-> int:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =coordinate_size
lowerCamelCase_ =shape_size
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
lowerCamelCase_ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =(image_size // patch_size) ** 2 + 1
lowerCamelCase_ =self.text_seq_length + self.image_seq_length
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase_ =bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ =bbox[i, j, 3]
lowerCamelCase_ =bbox[i, j, 1]
lowerCamelCase_ =tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ =bbox[i, j, 2]
lowerCamelCase_ =bbox[i, j, 0]
lowerCamelCase_ =tmp_coordinate
lowerCamelCase_ =tf.constant(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase_ =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =TFLayoutLMvaModel(config=_SCREAMING_SNAKE_CASE )
# text + image
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase_ =model({"""pixel_values""": pixel_values} , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFLayoutLMvaForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFLayoutLMvaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =2
lowerCamelCase_ =TFLayoutLMvaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.prepare_config_and_inputs()
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase:str = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_UpperCamelCase:List[Any] = False
_UpperCamelCase:List[str] = False
_UpperCamelCase:List[str] = False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
return True
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> dict:
lowerCamelCase_ =copy.deepcopy(_SCREAMING_SNAKE_CASE )
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ ={
k: tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self )-> Dict:
lowerCamelCase_ =TFLayoutLMvaModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> Any:
self.config_tester.run_common_tests()
def _snake_case ( self )-> Tuple:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
if getattr(_SCREAMING_SNAKE_CASE , """hf_compute_loss""" , _SCREAMING_SNAKE_CASE ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase_ =self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_SCREAMING_SNAKE_CASE )[0]
]
lowerCamelCase_ =added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase_ =self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepared_for_class.pop("""input_ids""" )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase_ =self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
lowerCamelCase_ =prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase_ =-100
lowerCamelCase_ =tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase_ =self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase_ =self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase_ =prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase_ =inspect.signature(model.call ).parameters
lowerCamelCase_ =list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase_ ={0: """input_ids"""}
for label_key in label_keys:
lowerCamelCase_ =signature_names.index(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =label_key
lowerCamelCase_ =sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase_ =[]
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase_ =prepared_for_class[value]
lowerCamelCase_ =tuple(_SCREAMING_SNAKE_CASE )
# Send to model
lowerCamelCase_ =model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self )-> Tuple:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[int]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ =type
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =TFLayoutLMvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->str:
"""simple docstring"""
lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def _snake_case ( self )-> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ).pixel_values
lowerCamelCase_ =tf.constant([[1, 2]] )
lowerCamelCase_ =tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase_ =model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCamelCase_ =(1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( _A : Optional[Any] , _A : List[str]=7 ) ->str:
"""simple docstring"""
lowerCamelCase_ =None
if token is not None:
lowerCamelCase_ ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
lowerCamelCase_ ="""636036"""
lowerCamelCase_ =f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
lowerCamelCase_ =requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
lowerCamelCase_ =get_daily_ci_runs(_A )
lowerCamelCase_ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase_ =workflow_run["""id"""]
break
return workflow_run_id
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowerCamelCase_ =get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase_ =artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def __UpperCamelCase ( _A : int , _A : Any , _A : Optional[int] ) ->List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
lowerCamelCase_ ={}
for artifact_name in artifact_names:
lowerCamelCase_ =os.path.join(_A , f'{artifact_name}.zip' )
if os.path.isfile(_A ):
lowerCamelCase_ ={}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowerCamelCase_ =f.read().decode("""UTF-8""" )
return results
| 154 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( __lowercase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
_UpperCAmelCase = [n]
for i in range(1 , len(__lowercase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
if len(str(__lowercase ) ) > 3:
if not is_prime(int(str(__lowercase )[-3:] ) ) or not is_prime(int(str(__lowercase )[:3] ) ):
return False
return True
def UpperCAmelCase_ ( __lowercase : int = 11 ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(__lowercase ) != count:
if validate(__lowercase ):
_UpperCAmelCase = list_truncated_nums(__lowercase )
if all(is_prime(__lowercase ) for i in list_nums ):
list_truncated_primes.append(__lowercase )
num += 2
return list_truncated_primes
def UpperCAmelCase_ ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(11)) = }")
| 359 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [False] * len(__lowercase )
_UpperCAmelCase = []
queue.append(__lowercase )
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (len(__lowercase ))
_UpperCAmelCase = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = float("Inf" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(__lowercase , graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
return max_flow
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 156 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __a(SCREAMING_SNAKE_CASE_ : str = "laptop" ):
'''simple docstring'''
_lowerCAmelCase = F'''https://www.amazon.in/laptop/s?k={product}'''
_lowerCAmelCase = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase = item.ha.text
_lowerCAmelCase = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase = "Not available"
try:
_lowerCAmelCase = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase = ""
try:
_lowerCAmelCase = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase = float("nan" )
except AttributeError:
pass
_lowerCAmelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase = " "
_lowerCAmelCase = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 158 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 320 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = IFImgaImgSuperResolutionPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowercase__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=0 ) -> Optional[Any]:
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
self._test_save_load_local()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 241 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , ):
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(lowerCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(lowerCAmelCase__ )
else:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 241 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Union[str, Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_UpperCamelCase , _UpperCamelCase )
# Predict target for test data
snake_case_ : Optional[Any] = xgb.predict(_UpperCamelCase )
snake_case_ : List[str] = predictions.reshape(len(_UpperCamelCase ) , 1 )
return predictions
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Any = fetch_california_housing()
snake_case_ , snake_case_ : Tuple = data_handling(_UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 , random_state=1 )
snake_case_ : List[Any] = xgboost(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_UpperCamelCase , _UpperCamelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(_UpperCamelCase , _UpperCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 279 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : str ,lowercase__ : Tuple ):
__lowercase = question_encoder
__lowercase = generator
__lowercase = self.question_encoder
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ):
if os.path.isfile(lowercase__ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
__lowercase = os.path.join(lowercase__ ,'''question_encoder_tokenizer''' )
__lowercase = os.path.join(lowercase__ ,'''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowercase__ )
self.generator.save_pretrained(lowercase__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ,lowercase__ : List[str] ,**lowercase__ : Optional[Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowercase = kwargs.pop('''config''' ,lowercase__ )
if config is None:
__lowercase = RagConfig.from_pretrained(lowercase__ )
__lowercase = AutoTokenizer.from_pretrained(
lowercase__ ,config=config.question_encoder ,subfolder='''question_encoder_tokenizer''' )
__lowercase = AutoTokenizer.from_pretrained(
lowercase__ ,config=config.generator ,subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowercase__ ,generator=lowercase__ )
def __call__( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : str ):
return self.current_tokenizer(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*lowercase__ : List[Any] ,**lowercase__ : int ):
return self.generator.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : str ):
return self.generator.decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.question_encoder
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.generator
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Optional[List[str]] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,lowercase__ : str = "longest" ,lowercase__ : str = None ,lowercase__ : bool = True ,**lowercase__ : int ,):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' ,lowercase__ ,)
if max_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
lowercase__ ,add_special_tokens=lowercase__ ,return_tensors=lowercase__ ,max_length=lowercase__ ,padding=lowercase__ ,truncation=lowercase__ ,**lowercase__ ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
text_target=lowercase__ ,add_special_tokens=lowercase__ ,return_tensors=lowercase__ ,padding=lowercase__ ,max_length=lowercase__ ,truncation=lowercase__ ,**lowercase__ ,)
__lowercase = labels['''input_ids''']
return model_inputs
| 52 |
'''simple docstring'''
import math
def _A ( A__ = 100 ):
"""simple docstring"""
__lowercase = sum(i * i for i in range(1 , n + 1 ) )
__lowercase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 52 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
return (gray > 127) & (gray <= 255)
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ):
UpperCamelCase :str = np.zeros_like(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCamelCase :Any = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCamelCase :Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase :str = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__snake_case = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__snake_case = np.array(Image.open(lena_path))
# kernel to be applied
__snake_case = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__snake_case = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__snake_case = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 259 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Dict:
UpperCamelCase :Any = parent
UpperCamelCase :Dict = 13
UpperCamelCase :List[Any] = 7
UpperCamelCase :List[Any] = True
UpperCamelCase :Dict = True
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :List[str] = True
UpperCamelCase :Dict = 99
UpperCamelCase :Any = 32
UpperCamelCase :Tuple = 2
UpperCamelCase :Union[str, Any] = 4
UpperCamelCase :List[str] = 37
UpperCamelCase :Dict = '''gelu'''
UpperCamelCase :Dict = 0.1
UpperCamelCase :Tuple = 0.1
UpperCamelCase :Dict = 512
UpperCamelCase :str = 16
UpperCamelCase :Optional[Any] = 2
UpperCamelCase :Dict = 0.02
UpperCamelCase :Optional[int] = 3
UpperCamelCase :int = 4
UpperCamelCase :Dict = None
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Optional[int] = None
if self.use_input_mask:
UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Dict = None
if self.use_token_type_ids:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :Union[str, Any] = None
UpperCamelCase :Optional[int] = None
UpperCamelCase :Any = None
if self.use_labels:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = TFRoFormerModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase :int = [input_ids, input_mask]
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :List[Any] = True
UpperCamelCase :Union[str, Any] = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[Any] = self.num_labels
UpperCamelCase :int = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :List[Any] = self.num_choices
UpperCamelCase :Any = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :Union[str, Any] = self.num_labels
UpperCamelCase :Dict = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Union[str, Any] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple =(
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : Optional[Any] =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Any = TFRoFormerModelTester(self )
UpperCamelCase :Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Tuple = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase :Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ )[0]
# TODO Replace vocab size
UpperCamelCase :Tuple = 5_0000
UpperCamelCase :Optional[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase :int = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =1E-4
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = tf.constant([[4, 10]] )
UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase :str = emba(input_ids.shape )
UpperCamelCase :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Dict = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCamelCase :Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase :Any = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =1E-4
def UpperCAmelCase ( self ) -> List[str]:
# 2,12,16,64
UpperCamelCase :List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase :List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase :int = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase , UpperCamelCase :List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCamelCase :Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
| 259 | 1 |
'''simple docstring'''
import os
from collections.abc import Iterator
def lowercase__ ( __UpperCamelCase = "." )-> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
UpperCamelCase = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).lstrip("""./""" )
def lowercase__ ( __UpperCamelCase )-> Tuple:
return F"{i * ' '}*" if i else "\n##"
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(__SCREAMING_SNAKE_CASE )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def lowercase__ ( __UpperCamelCase = "." )-> None:
UpperCamelCase = ""
for filepath in sorted(good_file_paths(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase = os.path.split(__SCREAMING_SNAKE_CASE )
if filepath != old_path:
UpperCamelCase = print_path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCamelCase = F"{filepath}/{filename}".replace(""" """ , """%20""" )
UpperCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F"{md_prefix(__SCREAMING_SNAKE_CASE )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.')
| 370 |
'''simple docstring'''
from PIL import Image
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Image:
def brightness(__UpperCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 183 | 0 |
import string
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
snake_case = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
snake_case = string.ascii_uppercase.find(A__ )
snake_case = num - key
if num < 0:
snake_case = num + len(string.ascii_uppercase )
snake_case = translated + string.ascii_uppercase[num]
else:
snake_case = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = input('''Encrypted message: ''' )
snake_case = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 127 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class snake_case_:
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def lowerCamelCase__ ( self : int ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=False , UpperCamelCase_ : str=False , UpperCamelCase_ : Union[str, Any]=False , ):
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : List[Any] = 3_2
lowerCAmelCase : Optional[int] = (3_2, 3_2)
lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = torch.device(UpperCamelCase_ )
lowerCAmelCase : Any = (batch_size, num_channels) + sizes
lowerCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
lowerCAmelCase : List[str] = 1_2_8
lowerCAmelCase : Union[str, Any] = randn_tensor((batch_size, temb_channels) , generator=UpperCamelCase_ , device=UpperCamelCase_ )
if include_res_hidden_states_tuple:
lowerCAmelCase : List[str] = torch.manual_seed(1 )
lowerCAmelCase : List[Any] = (randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ ),)
if include_encoder_hidden_states:
lowerCAmelCase : str = floats_tensor((batch_size, 3_2, 3_2) ).to(UpperCamelCase_ )
if include_skip_sample:
lowerCAmelCase : Any = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCamelCase_ , device=UpperCamelCase_ )
return dummy_input
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
lowerCAmelCase : List[str] = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = self.block_class(**UpperCamelCase_ )
unet_block.to(UpperCamelCase_ )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase : List[Any] = unet_block(**UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase : Optional[Any] = output[0, -1, -3:, -3:]
lowerCAmelCase : Optional[int] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
assert torch_all_close(output_slice.flatten() , UpperCamelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase : Tuple = self.block_class(**UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Dict = output[0]
lowerCAmelCase : List[Any] = torch.device(UpperCamelCase_ )
lowerCAmelCase : int = randn_tensor(output.shape , device=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
| 366 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """van"""
def __init__( self : List[Any] ,lowerCamelCase__ : List[str]=224 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[Any]=[7, 3, 3, 3] ,lowerCamelCase__ : Optional[Any]=[4, 2, 2, 2] ,lowerCamelCase__ : Tuple=[64, 128, 320, 512] ,lowerCamelCase__ : Any=[3, 3, 12, 3] ,lowerCamelCase__ : List[Any]=[8, 8, 4, 4] ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Any=0.0_2 ,lowerCamelCase__ : Dict=1E-6 ,lowerCamelCase__ : Optional[Any]=1E-2 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[Any]=0.0 ,**lowerCamelCase__ : List[str] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : List[str] = num_channels
_UpperCamelCase : Union[str, Any] = patch_sizes
_UpperCamelCase : Any = strides
_UpperCamelCase : Any = hidden_sizes
_UpperCamelCase : Tuple = depths
_UpperCamelCase : Tuple = mlp_ratios
_UpperCamelCase : Dict = hidden_act
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : List[str] = layer_scale_init_value
_UpperCamelCase : List[Any] = drop_path_rate
_UpperCamelCase : Optional[Any] = dropout_rate
| 83 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83 | 1 |
from ...configuration_utils import PretrainedConfig
snake_case_ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ (_UpperCamelCase ):
__lowerCamelCase : int = 'tapas'
def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=1024 , a=[3, 256, 256, 2, 256, 256, 10] , a=0.02 , a=1e-12 , a=0 , a=10.0 , a=0 , a=1.0 , a=None , a=1.0 , a=False , a=None , a=1.0 , a=1.0 , a=False , a=False , a="ratio" , a=None , a=None , a=64 , a=32 , a=False , a=True , a=False , a=False , a=True , a=False , a=None , a=None , **a , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Tuple = type_vocab_sizes
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Tuple = positive_label_weight
lowercase__ : Optional[int] = num_aggregation_labels
lowercase__ : Tuple = aggregation_loss_weight
lowercase__ : Union[str, Any] = use_answer_as_supervision
lowercase__ : Union[str, Any] = answer_loss_importance
lowercase__ : Any = use_normalized_answer_loss
lowercase__ : Any = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : Dict = aggregation_temperature
lowercase__ : List[str] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Optional[int] = cell_selection_preference
lowercase__ : Optional[Any] = answer_loss_cutoff
lowercase__ : str = max_num_rows
lowercase__ : List[Any] = max_num_columns
lowercase__ : str = average_logits_per_cell
lowercase__ : Union[str, Any] = select_one_column
lowercase__ : Tuple = allow_empty_column_selection
lowercase__ : Dict = init_cell_selection_weights_to_zero
lowercase__ : Any = reset_position_index_per_cell
lowercase__ : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : int = aggregation_labels
lowercase__ : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
lowercase__ : str = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()}
| 369 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "efficientnet"
def __init__( self : Optional[Any] ,lowercase_ : int = 3 ,lowercase_ : int = 6_0_0 ,lowercase_ : float = 2.0 ,lowercase_ : float = 3.1 ,lowercase_ : int = 8 ,lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowercase_ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] ,lowercase_ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] ,lowercase_ : List[int] = [] ,lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowercase_ : float = 0.25 ,lowercase_ : str = "swish" ,lowercase_ : int = 2_5_6_0 ,lowercase_ : str = "mean" ,lowercase_ : float = 0.02 ,lowercase_ : float = 0.001 ,lowercase_ : float = 0.99 ,lowercase_ : float = 0.5 ,lowercase_ : float = 0.2 ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : int = width_coefficient
lowerCAmelCase__ : int = depth_coefficient
lowerCAmelCase__ : Dict = depth_divisor
lowerCAmelCase__ : Optional[int] = kernel_sizes
lowerCAmelCase__ : Union[str, Any] = in_channels
lowerCAmelCase__ : Any = out_channels
lowerCAmelCase__ : Tuple = depthwise_padding
lowerCAmelCase__ : str = strides
lowerCAmelCase__ : Optional[Any] = num_block_repeats
lowerCAmelCase__ : Union[str, Any] = expand_ratios
lowerCAmelCase__ : Any = squeeze_expansion_ratio
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dim
lowerCAmelCase__ : Any = pooling_type
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = batch_norm_eps
lowerCAmelCase__ : str = batch_norm_momentum
lowerCAmelCase__ : Union[str, Any] = dropout_rate
lowerCAmelCase__ : Optional[Any] = drop_connect_rate
lowerCAmelCase__ : Dict = sum(lowercase_ ) * 4
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return 1E-5
| 106 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 | 1 |
import os
import pytest
from attr import dataclass
UpperCamelCase__ : int = """us-east-1""" # defaults region
@dataclass
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
SCREAMING_SNAKE_CASE_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
SCREAMING_SNAKE_CASE_ = {**hyperparameters, 'max_steps': 10_00}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = SageMakerTestEnvironment(framework=request.cls.framework )
| 330 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCamelCase__ : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
a = getattr(snake_case_, snake_case_ )
if weight_type is not None:
a = getattr(snake_case_, snake_case_ ).shape
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = []
a = fairseq_model.state_dict()
a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', )
a = True
else:
for key, mapped_key in MAPPING.items():
a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
a = True
if "*" in mapped_key:
a = name.split(snake_case_ )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''', snake_case_ )
if "weight_g" in name:
a = '''weight_g'''
elif "weight_v" in name:
a = '''weight_v'''
elif "bias" in name:
a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = '''weight'''
else:
a = None
set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
a = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
a = UniSpeechSatConfig()
a = ''''''
if is_finetuned:
a = UniSpeechSatForCTC(snake_case_ )
else:
a = UniSpeechSatForPreTraining(snake_case_ )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
a = model[0].eval()
recursively_load_weights(snake_case_, snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ : int = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 1 |
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int ):
__a : Dict = num_of_nodes
__a : list[list[int]] = []
__a : dict[int, int] = {}
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase (self : Any , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase (self : str , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : Optional[int] = self.find_component(snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
__a : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = []
__a : int = 0
__a : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[Any] = edge
__a : List[str] = self.m_component[u]
__a : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
__a , __a , __a : str = edge
__a : Any = self.m_component[u]
__a : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__a : Optional[int] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : str = len(lowerCAmelCase__ )
__a : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : str = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Tuple = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 216 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCAmelCase : int = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowerCAmelCase : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowerCAmelCase : List[str] = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowerCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowerCamelCase = evaluate(dataset=lowercase_ , predictions=lowercase_ )
return score
| 364 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> int:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCamelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCamelCase = XGLMConfig(
vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase = XGLMForCausalLM(snake_case__ )
lowerCamelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
print(snake_case__ )
lowerCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def snake_case_ ( _lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
UpperCAmelCase : str = BeautifulSoup(requests.get(_lowerCAmelCase ).text , '''html.parser''' )
UpperCAmelCase : Dict = soup.findAll('''h1''' )
UpperCAmelCase : Dict = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCAmelCase , _lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 23 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
__A =3_00 # TEMPERATURE (unit = K)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 | 0 |
'''simple docstring'''
import socket
def __lowerCamelCase ( ) -> Optional[int]:
_a : Dict = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_a : List[str] = socket.gethostname()
_a : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_a : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase_ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 107 |
'''simple docstring'''
__lowerCAmelCase = range(2, 20 + 1)
__lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase = {}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : Optional[int] = sum(a_i[j] for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) )
_a : List[str] = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) )
_a , _a : Any = 0, 0
_a : Any = n - i
_a : List[Any] = memo.get(lowerCAmelCase_ )
if sub_memo is not None:
_a : Tuple = sub_memo.get(lowerCAmelCase_ )
if jumps is not None and len(lowerCAmelCase_ ) > 0:
# find and make the largest jump without going over
_a : Any = -1
for _k in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a : Any = _k
break
if max_jump >= 0:
_a , _a , _a : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
_a : Union[str, Any] = diff + c
for j in range(min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) ):
_a , _a : Dict = divmod(lowerCAmelCase_ , 10 )
if new_c > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_a : Tuple = []
else:
_a : Any = {c: []}
_a : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a : Dict = next_term(lowerCAmelCase_ , k - 1 , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a : Any = compute(lowerCAmelCase_ , lowerCAmelCase_ , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
_a : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a : Any = 0
while j < len(lowerCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if i >= n:
return 0, i
if k > len(lowerCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a : Any = i
_a , _a , _a : Optional[int] = 0, 0, 0
for j in range(len(lowerCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a : Any = ds_c + ds_b
diff += addend
_a : int = 0
for j in range(lowerCAmelCase_ ):
_a : Optional[Any] = a_i[j] + addend
_a , _a : Tuple = divmod(lowerCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return diff, i - start_i
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
_a : Optional[Any] = digits[j] + addend
if s >= 10:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
_a : List[str] = addend // 10 + quotient
else:
_a : Optional[Any] = s
_a : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
digits.append(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 10**15 ) -> int:
_a : Dict = [1]
_a : int = 1
_a : Tuple = 0
while True:
_a , _a : str = next_term(lowerCAmelCase_ , 20 , i + dn , lowerCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_a : Union[str, Any] = 0
for j in range(len(lowerCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 107 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(lowerCamelCase__ , axis=0 )
lowerCamelCase_ = np.array(lowerCamelCase__ ).astype(np.floataa ) / 2_55.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowerCamelCase__ , dim=0 )
return image
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0.99_95 ):
if not isinstance(lowerCamelCase__ , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(lowerCamelCase__ )
lowerCamelCase_ = np.sin(lowerCamelCase__ )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(lowerCamelCase__ )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = F.normalize(lowerCamelCase__ , dim=-1 )
lowerCamelCase_ = F.normalize(lowerCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for param in model.parameters():
lowerCamelCase_ = value
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , clip_model=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , feature_extractor=lowercase , coca_model=lowercase , coca_tokenizer=lowercase , coca_transform=lowercase , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase )
set_requires_grad(self.clip_model , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
set_requires_grad(self.vae , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
set_requires_grad(self.vae , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
set_requires_grad(self.unet , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
set_requires_grad(self.unet , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> List[Any]:
# get the original timestep using init_timestep
lowerCamelCase_ = min(int(num_inference_steps * strength ) , lowercase )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Any:
if not isinstance(lowercase , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(lowercase )}' )
lowerCamelCase_ = image.to(device=lowercase , dtype=lowercase )
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
lowerCamelCase_ = torch.cat(lowercase , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(lowercase ).latent_dist.sample(lowercase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.1_8_2_1_5 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(lowercase , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , lowercase )
lowerCamelCase_ = init_latents
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
lowerCamelCase_ = self.coca_transform(lowercase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = self.feature_extractor.preprocess(lowercase )
lowerCamelCase_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(lowercase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(lowercase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowerCamelCase_ = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(lowercase )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.1_8_2_1_5 * sample
lowerCamelCase_ = self.vae.decode(lowercase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(lowercase )
lowerCamelCase_ = self.normalize(lowercase ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(lowercase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
lowerCamelCase_ = spherical_dist_loss(lowercase , lowercase ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(lowercase , lowercase )[0]
if isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(lowercase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = 512 , lowercase = 512 , lowercase = 0.6 , lowercase = 50 , lowercase = 7.5 , lowercase = 1 , lowercase = 0.0 , lowercase = 100 , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = 0.8 , lowercase = 0.1 , lowercase = 0.1 , ) -> int:
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(lowercase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(lowercase , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = ", ".join(lowercase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCamelCase_ = self.get_image_description(lowercase )
if style_prompt is None:
if len(lowercase ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCamelCase_ = self.get_image_description(lowercase )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(lowercase , lowercase , lowercase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(lowercase , dim=0 )
# set timesteps
lowerCamelCase_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(lowercase , **lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(lowercase , lowercase , self.device )
lowerCamelCase_ = timesteps[:1].repeat(lowercase )
# Preprocess image
lowerCamelCase_ = preprocess(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
lowerCamelCase_ = preprocess(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
lowerCamelCase_ = slerp(lowercase , lowercase , lowercase )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(lowercase , lowercase )
lowerCamelCase_ = self.get_clip_image_embeddings(lowercase , lowercase )
lowerCamelCase_ = slerp(
lowercase , lowercase , lowercase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([""] , padding="max_length" , max_length=lowercase , return_tensors="pt" )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(lowercase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(lowercase , generator=lowercase , device="cpu" , dtype=lowercase ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=lowercase ):
for i, t in enumerate(lowercase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowerCamelCase_ = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ = self.cond_fn(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.1_8_2_1_5 * latents
lowerCamelCase_ = self.vae.decode(lowercase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
| 19 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase_ = logging.getLogger(__name__)
lowercase_ = """Hello world! cécé herlolip"""
lowercase_ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
_SCREAMING_SNAKE_CASE = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
_SCREAMING_SNAKE_CASE = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_SCREAMING_SNAKE_CASE = encoder_input_ids
_SCREAMING_SNAKE_CASE = decoder_input_ids
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_SCREAMING_SNAKE_CASE = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = original.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = new_model.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 58 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCamelCase__) , """Tatoeba directory does not exist.""")
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' ,dry_run=__lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 94 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
__snake_case : Tuple ={
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""vit_msn"""
def __init__(self ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-06 ,__lowerCamelCase=2_24 ,__lowerCamelCase=16 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,**__lowerCamelCase ,) -> Any:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : str = patch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : int = qkv_bias
| 94 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict):
lowercase__ : List[Any] = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = tmp_path / "cache"
lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : int = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : str = features.copy()
lowercase__ : str = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]):
lowercase__ : Union[str, Any] = tmp_path / "cache"
lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Tuple = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = [jsonl_path]
lowercase__ : str = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]):
lowercase__ : str = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Union[str, Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
if split:
lowercase__ : Tuple = {split: jsonl_path}
else:
lowercase__ : Tuple = "train"
lowercase__ : int = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Optional[int] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : str = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : List[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : str = f.read()
assert exported_content == original_content
| 87 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ={}
lowerCamelCase__ : List[Any] =job['''started_at''']
lowerCamelCase__ : Tuple =job['''completed_at''']
lowerCamelCase__ : Dict =date_parser.parse(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =date_parser.parse(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCamelCase__ : Any =start
lowerCamelCase__ : Optional[int] =end
lowerCamelCase__ : List[str] =duration_in_min
return job_info
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
lowerCamelCase__ : List[str] =None
if token is not None:
lowerCamelCase__ : List[str] ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
lowerCamelCase__ : Optional[Any] =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCamelCase__ : int =requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
lowerCamelCase__ : List[Any] ={}
try:
job_time.update({job['''name''']: extract_time_from_single_job(__lowerCamelCase ) for job in result['''jobs''']} )
lowerCamelCase__ : Union[str, Any] =math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple =requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(__lowerCamelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_lowercase : int = parser.parse_args()
_lowercase : Optional[Any] = get_job_time(args.workflow_run_id)
_lowercase : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 272 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 272 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase : Tuple = True
except (ImportError, AttributeError):
lowercase : int = object
def _SCREAMING_SNAKE_CASE ( *_lowerCamelCase : int , **_lowerCamelCase : List[Any]) -> str:
'''simple docstring'''
pass
lowercase : Any = False
lowercase : List[str] = logging.get_logger('transformers-cli/serving')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Namespace) -> Any:
'''simple docstring'''
__UpperCamelCase : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 42
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 42
_A = 42
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 42
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 42
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :ArgumentParser ) -> int:
__UpperCamelCase : Tuple = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=a , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=a , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=a , default=8_8_8_8 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=a , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=a , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=a , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=a , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=a , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=a )
def __init__( self :Optional[int] , a :Pipeline , a :str , a :int , a :int ) -> Optional[int]:
__UpperCamelCase : Tuple = pipeline
__UpperCamelCase : int = host
__UpperCamelCase : List[Any] = port
__UpperCamelCase : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'Serving model over {host}:{port}' )
__UpperCamelCase : Optional[Any] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=a , response_class=a , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=a , response_class=a , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=a , response_class=a , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=a , response_class=a , methods=["POST"] , ),
] , timeout=6_0_0 , )
def _lowerCamelCase ( self :List[Any] ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self :str ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self :int , a :str = Body(a , embed=a ) , a :bool = Body(a , embed=a ) ) -> List[Any]:
try:
__UpperCamelCase : Optional[int] = self._pipeline.tokenizer.tokenize(a )
if return_ids:
__UpperCamelCase : Optional[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(a )
return ServeTokenizeResult(tokens=a , tokens_ids=a )
else:
return ServeTokenizeResult(tokens=a )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"model": "", "error": str(a )} )
def _lowerCamelCase ( self :Optional[Any] , a :List[int] = Body(a , embed=a ) , a :bool = Body(a , embed=a ) , a :bool = Body(a , embed=a ) , ) -> str:
try:
__UpperCamelCase : Tuple = self._pipeline.tokenizer.decode(a , a , a )
return ServeDeTokenizeResult(model="" , text=a )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"model": "", "error": str(a )} )
async def _lowerCamelCase ( self :Optional[int] , a :List[str]=Body(a , embed=a ) ) -> Optional[int]:
# Check we don't have empty string
if len(a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__UpperCamelCase : Any = self._pipeline(a )
return ServeForwardResult(output=a )
except Exception as e:
raise HTTPException(5_0_0 , {"error": str(a )} ) | 232 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 232 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Dict = KandinskyImgaImgPipeline
a__ : Union[str, Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a__ : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a__ : Any = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ : List[str] = False
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_= MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase_= MultilingualCLIP(__UpperCAmelCase )
UpperCAmelCase_= text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_= {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_= UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_= VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.dummy_text_encoder
UpperCAmelCase_= self.dummy_tokenizer
UpperCAmelCase_= self.dummy_unet
UpperCAmelCase_= self.dummy_movq
UpperCAmelCase_= {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase_= DDIMScheduler(**__UpperCAmelCase )
UpperCAmelCase_= {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=0 ) -> Union[str, Any]:
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
UpperCAmelCase_= floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_= Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
UpperCAmelCase_= output.images
UpperCAmelCase_= pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase_= """A red cartoon frog, 4k"""
UpperCAmelCase_= KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
UpperCAmelCase_= KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
UpperCAmelCase_= pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_, UpperCAmelCase_= pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase_= pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
UpperCAmelCase_= output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 360 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def __a ( lowerCAmelCase_ : Accelerator ,lowerCAmelCase_ : int = 16 ,lowerCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_= load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowerCAmelCase_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_= tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_= datasets.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_= tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowerCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase_= DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
UpperCAmelCase_= DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_= config["""lr"""]
UpperCAmelCase_= int(config["""num_epochs"""] )
UpperCAmelCase_= int(config["""seed"""] )
UpperCAmelCase_= int(config["""batch_size"""] )
UpperCAmelCase_= args.model_name_or_path
set_seed(lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,return_dict=lowerCAmelCase_ )
# Instantiate optimizer
UpperCAmelCase_= (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_= optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_= accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_= 1
UpperCAmelCase_= (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_= get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase_ ,)
else:
UpperCAmelCase_= DummyScheduler(lowerCAmelCase_ ,total_num_steps=lowerCAmelCase_ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_= 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_= 0
# Now we train the model
UpperCAmelCase_= evaluate.load("""glue""" ,"""mrpc""" )
UpperCAmelCase_= 0
UpperCAmelCase_= {}
for epoch in range(lowerCAmelCase_ ,lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.loss
UpperCAmelCase_= loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_= 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_, UpperCAmelCase_= accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
UpperCAmelCase_= predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_= references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,)
UpperCAmelCase_= metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,lowerCAmelCase_ )
UpperCAmelCase_= eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_= eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowerCAmelCase_ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowerCAmelCase_ ,)
parser.add_argument(
"""--output_dir""" ,type=lowerCAmelCase_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowerCAmelCase_ ,default=3 ,help="""Number of train epochs.""" ,)
UpperCAmelCase_= parser.parse_args()
UpperCAmelCase_= {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ ,lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 277 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=None ):
if rng is None:
snake_case : Optional[Any] = random.Random()
snake_case : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
snake_case : str = []
for _ in range(__lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
snake_case : List[Any] = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase )
return output
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=None ):
snake_case : Any = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase )
# make sure that at least one token is attended to for each batch
snake_case : List[str] = 1
return attn_mask
@require_flax
class UpperCAmelCase :
A__ : Dict = None
A__ : Optional[int] = ()
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case : str = 2
snake_case : int = inputs["input_ids"].shape[-1] // 2
snake_case : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
snake_case : Tuple = jnp.ones_like(snake_case__ )
snake_case : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = max_length
snake_case : List[Any] = 0
for model_class in self.all_generative_model_classes:
snake_case : List[Any] = model_class(snake_case__ )
snake_case : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case : List[str] = getattr(snake_case__ , snake_case__ )
snake_case : Optional[int] = pt_model_class(snake_case__ ).eval()
snake_case : Tuple = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params )
snake_case : str = flax_model.generate(snake_case__ ).sequences
snake_case : str = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : str = self._get_input_ids_and_config()
snake_case : Union[str, Any] = False
snake_case : List[str] = max_length
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : str = jit(model.generate )
snake_case : Optional[int] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : Optional[Any] = True
snake_case : int = max_length
for model_class in self.all_generative_model_classes:
snake_case : List[Any] = model_class(snake_case__ )
snake_case : List[str] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : int = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : int = self._get_input_ids_and_config()
snake_case : List[str] = False
snake_case : Optional[Any] = max_length
snake_case : List[Any] = 2
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Any = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : int = jit(model.generate )
snake_case : Dict = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : str = False
snake_case : Optional[int] = max_length
snake_case : Union[str, Any] = 2
snake_case : Optional[int] = 2
for model_class in self.all_generative_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Any = self._get_input_ids_and_config()
snake_case : int = True
snake_case : Dict = max_length
snake_case : Optional[int] = 0.8
snake_case : Dict = 10
snake_case : Optional[int] = 0.3
snake_case : Tuple = 1
snake_case : Optional[Any] = 8
snake_case : List[Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : Any = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : int = max_length
snake_case : int = 1
snake_case : Optional[int] = 8
snake_case : Any = 9
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : int = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[Any] = jit(model.generate )
snake_case : Union[str, Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : List[Any] = max_length
snake_case : Dict = 2
snake_case : Any = 1
snake_case : str = 8
snake_case : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : Optional[Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[Any] = attention_mask.at[(0, 0)].set(0 )
snake_case : Tuple = False
snake_case : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : str = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[str] = jit(model.generate )
snake_case : Dict = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Any = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[str] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[int] = True
snake_case : Any = max_length
for model_class in self.all_generative_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[Any] = jit(model.generate )
snake_case : List[str] = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : Optional[int] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[Any] = 2
snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(snake_case__ )
snake_case : Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[Any] = jit(model.generate )
snake_case : str = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
snake_case : List[str] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
snake_case : Any = "Hello world"
snake_case : str = tokenizer(snake_case__ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case__ , "do_samples" ):
model.generate(snake_case__ , do_samples=snake_case__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case__ , "foo" ):
snake_case : Optional[Any] = {"foo": "bar"}
model.generate(snake_case__ , **snake_case__ )
| 59 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 0 |
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[indexa], array[indexa]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
if length > 1:
_SCREAMING_SNAKE_CASE = int(length / 2 )
for i in range(snake_case__ ,low + middle ):
comp_and_swap(snake_case__ ,snake_case__ ,i + middle ,snake_case__ )
bitonic_merge(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
bitonic_merge(snake_case__ ,low + middle ,snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
if length > 1:
_SCREAMING_SNAKE_CASE = int(length / 2 )
bitonic_sort(snake_case__ ,snake_case__ ,snake_case__ ,1 )
bitonic_sort(snake_case__ ,low + middle ,snake_case__ ,0 )
bitonic_merge(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 358 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( snake_case__ ,snake_case__="shi-labs/oneformer_demo" ) -> Union[str, Any]:
"""simple docstring"""
with open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for key, info in class_info.items():
_SCREAMING_SNAKE_CASE = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_SCREAMING_SNAKE_CASE = thing_ids
_SCREAMING_SNAKE_CASE = class_names
return metadata
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: List[str]=400 , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_: int=[0.5, 0.5, 0.5] , UpperCAmelCase_: List[str]=10 , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: Optional[int]=255 , UpperCAmelCase_: Tuple="shi-labs/oneformer_demo" , UpperCAmelCase_: Union[str, Any]="ade20k_panoptic.json" , UpperCAmelCase_: Union[str, Any]=10 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = class_info_file
_SCREAMING_SNAKE_CASE = prepare_metadata(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = num_text
_SCREAMING_SNAKE_CASE = repo_path
# for the post_process_functions
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = do_reduce_labels
_SCREAMING_SNAKE_CASE = ignore_index
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase ( self: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str]=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__snake_case : int = image_processing_class
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """ignore_index""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """class_info_file""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """num_text""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """repo_path""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """metadata""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_reduce_labels""" ) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Any=False , UpperCAmelCase_: str="np" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_SCREAMING_SNAKE_CASE = self.image_processing_tester.num_labels
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ )
if with_segmentation_maps:
_SCREAMING_SNAKE_CASE = num_labels
if is_instance_map:
_SCREAMING_SNAKE_CASE = list(range(UpperCAmelCase_ ) ) * 2
_SCREAMING_SNAKE_CASE = dict(enumerate(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_SCREAMING_SNAKE_CASE = [Image.fromarray(UpperCAmelCase_ ) for annotation in annotations]
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , UpperCAmelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=UpperCAmelCase_ , pad_and_return_pixel_mask=UpperCAmelCase_ , )
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
def common(UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Optional[int]=None ):
_SCREAMING_SNAKE_CASE = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase_ , is_instance_map=UpperCAmelCase_ , segmentation_type=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inputs["""mask_labels"""]
_SCREAMING_SNAKE_CASE = inputs["""class_labels"""]
_SCREAMING_SNAKE_CASE = inputs["""pixel_values"""]
_SCREAMING_SNAKE_CASE = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase_ )
common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" )
common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.zeros((20, 50) )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = binary_mask_to_rle(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_SCREAMING_SNAKE_CASE = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ , target_sizes=UpperCAmelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = image_processor.post_process_instance_segmentation(UpperCAmelCase_ , threshold=0 )
self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = image_processor.post_process_panoptic_segmentation(UpperCAmelCase_ , threshold=0 )
self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 125 | 0 |
def __lowercase ( _A ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = 0
while len(lowerCAmelCase__ ) > 1:
SCREAMING_SNAKE_CASE : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE : Optional[int] = files.index(min(lowerCAmelCase__ ) )
temp += files[min_index]
files.pop(lowerCAmelCase__ )
files.append(lowerCAmelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a : List[str] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a : Any = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a : str = sorted(arg_to_scheduler.keys())
a : Any = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="base", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Any = Path(self.hparams.output_dir )
UpperCAmelCase_: Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_: str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"""num_labels""": num_labels} if num_labels is not None else {}), cache_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PretrainedConfig = config
UpperCAmelCase_: Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config, SCREAMING_SNAKE_CASE_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config, SCREAMING_SNAKE_CASE_, getattr(self.hparams, SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
UpperCAmelCase_: List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PreTrainedTokenizer = tokenizer
UpperCAmelCase_: List[Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_: Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ), config=self.config, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: Optional[Any] = model
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Any = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_: Optional[Any] = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
UpperCAmelCase_: Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = self.model
UpperCAmelCase_: str = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase_: str = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_: List[str] = Adafactor(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, scale_parameter=SCREAMING_SNAKE_CASE_, relative_step=SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Union[str, Any] = AdamW(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
UpperCAmelCase_: Optional[int] = optimizer
UpperCAmelCase_: int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_: int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if stage == "test":
UpperCAmelCase_: int = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_: Dict = self.get_dataloader("""train""", self.hparams.train_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = len(self.train_dataloader().dataset )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case (self ) -> List[str]:
return self.train_loader
def __snake_case (self ) -> int:
return self.get_dataloader("""dev""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
return self.get_dataloader("""test""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return os.path.join(
self.hparams.data_dir, """cached_{}_{}_{}""".format(
SCREAMING_SNAKE_CASE_, list(filter(SCREAMING_SNAKE_CASE_, self.hparams.model_name_or_path.split("""/""" ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: List[str] = self.output_dir.joinpath("""best_tfmr""" )
UpperCAmelCase_: List[Any] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
parser.add_argument(
"""--model_name_or_path""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--config_name""", default="""""", type=SCREAMING_SNAKE_CASE_, help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument(
"""--cache_dir""", default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / """test_run""" / """cache""" ), type=SCREAMING_SNAKE_CASE_, help="""Where do you want to store the pre-trained models downloaded from huggingface.co""", )
parser.add_argument(
"""--encoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Encoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--decoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Decoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--dropout""", type=SCREAMING_SNAKE_CASE_, help="""Dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--attention_dropout""", type=SCREAMING_SNAKE_CASE_, help="""Attention dropout probability (Optional). Goes into model.config""", )
parser.add_argument("""--learning_rate""", default=5E-5, type=SCREAMING_SNAKE_CASE_, help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""", default="""linear""", choices=SCREAMING_SNAKE_CASE_, metavar=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Learning rate scheduler""", )
parser.add_argument("""--weight_decay""", default=0.0, type=SCREAMING_SNAKE_CASE_, help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""", default=1E-8, type=SCREAMING_SNAKE_CASE_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""", default=0, type=SCREAMING_SNAKE_CASE_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""", default=4, type=SCREAMING_SNAKE_CASE_, help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""", dest="""max_epochs""", default=3, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--train_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--eval_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--adafactor""", action="""store_true""" )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Optional[Any] = trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase_: Optional[int] = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Validation results *****""" )
UpperCAmelCase_: int = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Test results *****""" )
UpperCAmelCase_: Any = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_: List[Any] = os.path.join(pl_module.hparams.output_dir, """test_results.txt""" )
with open(SCREAMING_SNAKE_CASE_, """w""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=4_2 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase_ (lowerCAmelCase__: BaseTransformer , lowerCAmelCase__: argparse.Namespace , lowerCAmelCase__: Union[str, Any]=None , lowerCAmelCase__: Optional[Any]=True , lowerCAmelCase__: Dict=[] , lowerCAmelCase__: Tuple=None , lowerCAmelCase__: List[str]=None , **lowerCAmelCase__: List[Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_: Dict = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_: Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCAmelCase_: Any = LoggingCallback()
UpperCAmelCase_: Optional[int] = {}
if args.fpaa:
UpperCAmelCase_: List[str] = 1_6
if args.gpus > 1:
UpperCAmelCase_: str = """auto"""
UpperCAmelCase_: Union[str, Any] = """ddp"""
UpperCAmelCase_: Tuple = args.accumulate_grad_batches
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: List[Any] = """auto"""
UpperCAmelCase_: Any = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 147 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
def lowerCAmelCase (__UpperCamelCase : Optional[Any] , __UpperCamelCase : str=0.9_9_9 , __UpperCamelCase : Tuple="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : Optional[int] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__UpperCamelCase =[]
for i in range(__UpperCamelCase ):
__UpperCamelCase =i / num_diffusion_timesteps
__UpperCamelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class _lowercase ( __a , __a ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ) -> List[str]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
__UpperCamelCase =betas_for_alpha_bar(UpperCamelCase__ )
__UpperCamelCase =1.0 - self.betas
__UpperCamelCase =torch.cumprod(self.alphas , dim=0 )
__UpperCamelCase =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCamelCase =1.0
# setable values
__UpperCamelCase =None
__UpperCamelCase =torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
__UpperCamelCase =variance_type
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =num_inference_steps
__UpperCamelCase =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCamelCase =(np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCamelCase =torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None ) -> List[str]:
'''simple docstring'''
if prev_timestep is None:
__UpperCamelCase =t - 1
__UpperCamelCase =self.alphas_cumprod[t]
__UpperCamelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase =1 - alpha_prod_t
__UpperCamelCase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase =self.betas[t]
else:
__UpperCamelCase =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCamelCase =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCamelCase =torch.log(torch.clamp(UpperCamelCase__ , min=1E-20 ) )
__UpperCamelCase =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCamelCase =variance.log()
__UpperCamelCase =beta.log()
__UpperCamelCase =(predicted_variance + 1) / 2
__UpperCamelCase =frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__UpperCamelCase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCamelCase , __UpperCamelCase =torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
__UpperCamelCase =None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCamelCase =t - 1
__UpperCamelCase =self.alphas_cumprod[t]
__UpperCamelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase =1 - alpha_prod_t
__UpperCamelCase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase =self.betas[t]
__UpperCamelCase =self.alphas[t]
else:
__UpperCamelCase =1 - alpha_prod_t / alpha_prod_t_prev
__UpperCamelCase =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase =model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase =torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCamelCase =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCamelCase =0
if t > 0:
__UpperCamelCase =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
__UpperCamelCase =self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
__UpperCamelCase =variance
elif self.variance_type == "learned_range":
__UpperCamelCase =(0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
__UpperCamelCase =variance * variance_noise
__UpperCamelCase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__UpperCamelCase =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCamelCase =timesteps.to(original_samples.device )
__UpperCamelCase =alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase =sqrt_alpha_prod.unsqueeze(-1 )
__UpperCamelCase =(1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCamelCase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowercase ( ) -> List[str]:
__lowerCAmelCase : Dict = Github(os.environ["GITHUB_TOKEN"] )
__lowerCAmelCase : List[Any] = g.get_repo("huggingface/diffusers" )
__lowerCAmelCase : str = repo.get_issues(state="open" )
for issue in open_issues:
__lowerCAmelCase : Dict = sorted(issue.get_comments() ,key=lambda __snake_case : i.created_at ,reverse=__snake_case )
__lowerCAmelCase : Dict = comments[0] if len(__snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main() | 58 |
"""simple docstring"""
from math import pi
def _lowercase ( __snake_case ,__snake_case ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 58 | 1 |
'''simple docstring'''
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0 , UpperCAmelCase = 0 )-> int:
"""simple docstring"""
__A = right or len(UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase , UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class a__ ( __SCREAMING_SNAKE_CASE ):
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task='fill-mask' , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__a = '''1'''
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task='fill-mask' , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__a = self.get_env()
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
__a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
__a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__a = self.get_env()
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
__a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__a = '''1'''
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = '''
from transformers import pipeline
'''
__a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
__a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
__a = self.get_env()
__a = '''1'''
__a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = '''
from transformers import AutoModel
'''
__a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__a = self.get_env()
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__a = '''1'''
__a = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 367 | def lowerCAmelCase( __lowerCamelCase ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__a = ''
while len(__lowerCamelCase ) % 3 != 0:
__a = '0' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(__lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(__lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCamelCase ) )
oct_string += str(__lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197 | 0 |
'''simple docstring'''
from itertools import product
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = sides_number
_UpperCAmelCase : int = max_face_number * dice_number
_UpperCAmelCase : List[Any] = [0] * (max_total + 1)
_UpperCAmelCase : int = 1
_UpperCAmelCase : Union[str, Any] = range(lowerCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase_ , repeat=lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = sum(lowerCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def snake_case_ ( )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[str] = 9
_UpperCAmelCase : str = 4 * 9
_UpperCAmelCase : Optional[int] = 6
for peter_total in range(lowerCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase : Optional[int] = (4**9) * (6**6)
_UpperCAmelCase : Optional[int] = peter_wins_count / total_games_number
_UpperCAmelCase : Optional[int] = round(lowerCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 215 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215 | 1 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__A : str = logging.getLogger(__name__)
__A : List[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__A : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCAmelCase )} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def a_ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCamelCase :
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowercase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
lowercase : Optional[int] = field(
default=_UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowercase : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def a_ ( self ):
if self.train_file is not None:
UpperCamelCase : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCamelCase : str = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A_ ( snake_case_ : List[str] ,snake_case_ : Tuple ):
'''simple docstring'''
with open(snake_case_ ,"""r""" ,encoding="""utf-8""" ) as f:
UpperCamelCase : Optional[int] = [json.loads(snake_case_ ) for line in f.read().splitlines() if (len(snake_case_ ) > 0 and not line.isspace())]
assert len(snake_case_ ) == len(snake_case_ )
UpperCamelCase : Tuple = {c: dataset[c] for c in dataset.column_names}
UpperCamelCase : int = refs
return Dataset.from_dict(snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,snake_case_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase : List[str] = load_dataset(data_args.dataset_name ,data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCamelCase : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'train[:{data_args.validation_split_percentage}%]' ,)
UpperCamelCase : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'train[{data_args.validation_split_percentage}%:]' ,)
else:
UpperCamelCase : List[str] = {}
if data_args.train_file is not None:
UpperCamelCase : Any = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase : Dict = data_args.validation_file
UpperCamelCase : Any = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
UpperCamelCase : int = """text"""
UpperCamelCase : List[Any] = load_dataset(snake_case_ ,data_files=snake_case_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Dict = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase : Tuple = AutoConfig.from_pretrained(model_args.config_name ,**snake_case_ )
elif model_args.model_name_or_path:
UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.model_name_or_path ,**snake_case_ )
else:
UpperCamelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
UpperCamelCase : Dict = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCamelCase : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,**snake_case_ )
elif model_args.model_name_or_path:
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,**snake_case_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
UpperCamelCase : Dict = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase : Dict = AutoModelForMaskedLM.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCamelCase : Tuple = datasets["""train"""].column_names
else:
UpperCamelCase : Any = datasets["""validation"""].column_names
UpperCamelCase : str = """text""" if """text""" in column_names else column_names[0]
UpperCamelCase : Optional[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(snake_case_ : Optional[int] ):
# Remove empty lines
UpperCamelCase : Optional[Any] = [line for line in examples["""text"""] if len(snake_case_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] ,padding=snake_case_ ,truncation=snake_case_ ,max_length=data_args.max_seq_length )
UpperCamelCase : List[str] = datasets.map(
snake_case_ ,batched=snake_case_ ,num_proc=data_args.preprocessing_num_workers ,remove_columns=[text_column_name] ,load_from_cache_file=not data_args.overwrite_cache ,)
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCamelCase : List[Any] = add_chinese_references(tokenized_datasets["""train"""] ,data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCamelCase : Any = add_chinese_references(
tokenized_datasets["""validation"""] ,data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCamelCase : List[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCamelCase : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCamelCase : Dict = DataCollatorForWholeWordMask(tokenizer=snake_case_ ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase : Any = Trainer(
model=snake_case_ ,args=snake_case_ ,train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None ,eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None ,tokenizer=snake_case_ ,data_collator=snake_case_ ,)
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : List[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase : Optional[Any] = model_args.model_name_or_path
else:
UpperCamelCase : int = None
UpperCamelCase : Any = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase : List[str] = os.path.join(training_args.output_dir ,"""train_results.txt""" )
if trainer.is_world_process_zero():
with open(snake_case_ ,"""w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) )
# Evaluation
UpperCamelCase : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase : List[str] = trainer.evaluate()
UpperCamelCase : List[str] = math.exp(eval_output["""eval_loss"""] )
UpperCamelCase : Any = perplexity
UpperCamelCase : Optional[int] = os.path.join(training_args.output_dir ,"""eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(snake_case_ ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 367 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__A : Optional[Any] = logging.get_logger(__name__)
def A_ ( snake_case_ : np.ndarray ,snake_case_ : Union[int, Iterable[int]] ,snake_case_ : bool ,snake_case_ : int ):
'''simple docstring'''
def constraint_to_multiple_of(snake_case_ : Optional[Any] ,snake_case_ : Optional[int] ,snake_case_ : List[str]=0 ,snake_case_ : Optional[Any]=None ):
UpperCamelCase : List[str] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase : Dict = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase : Any = (output_size, output_size) if isinstance(snake_case_ ,snake_case_ ) else output_size
UpperCamelCase , UpperCamelCase : int = get_image_size(snake_case_ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = output_size
# determine new height and width
UpperCamelCase : List[str] = output_height / input_height
UpperCamelCase : List[str] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase : int = scale_width
else:
# fit height
UpperCamelCase : Optional[Any] = scale_height
UpperCamelCase : int = constraint_to_multiple_of(scale_height * input_height ,multiple=snake_case_ )
UpperCamelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width ,multiple=snake_case_ )
return (new_height, new_width)
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : str = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = do_resize
UpperCamelCase : Union[str, Any] = size
UpperCamelCase : Union[str, Any] = keep_aspect_ratio
UpperCamelCase : Any = ensure_multiple_of
UpperCamelCase : List[Any] = resample
UpperCamelCase : str = do_rescale
UpperCamelCase : Optional[Any] = rescale_factor
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase : Dict = get_resize_output_image_size(
SCREAMING_SNAKE_CASE_ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=SCREAMING_SNAKE_CASE_ , multiple=SCREAMING_SNAKE_CASE_ , )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : List[Any] = size if size is not None else self.size
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase : Tuple = resample if resample is not None else self.resample
UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Any = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase : str = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase : int = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase : List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = target_sizes.numpy()
UpperCamelCase : Dict = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : List[Any] = logits.argmax(dim=1 )
UpperCamelCase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 27 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def lowerCamelCase__ ( self : List[str] ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def lowerCamelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : Any = DDIMScheduler()
__lowerCamelCase : int = self.dummy_vq_model
__lowerCamelCase : Dict = LDMPipeline(unet=UpperCAmelCase , vqvae=UpperCAmelCase , scheduler=UpperCAmelCase )
ldm.to(UpperCAmelCase )
ldm.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = ldm(generator=UpperCAmelCase , num_inference_steps=2 , output_type="numpy" ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : List[str] = ldm(generator=UpperCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=UpperCAmelCase )[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Tuple = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : Optional[int] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[str] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(UpperCAmelCase )
ldm.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : str = ldm(generator=UpperCAmelCase , num_inference_steps=5 , output_type="numpy" ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : Any = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Optional[int] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 135 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: Dict ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Tuple = 1
__lowerCamelCase : int = 2
while i * i <= n:
__lowerCamelCase : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : List[str] = 1
__lowerCamelCase : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(_lowerCamelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 135 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCamelCase__( __A ):
lowerCAmelCase__ : int = DistilBertTokenizer
lowerCAmelCase__ : int = DistilBertTokenizerFast
lowerCAmelCase__ : List[Any] = True
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A__ = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCAmelCase )
A__ = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCAmelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 352 | """simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case__ ( self ) -> int:
A__ = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
A__ = [sys.executable] + distributed_args
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
| 154 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE ) as metadata_file:
lowerCAmelCase : Tuple = json.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCAmelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["module"]
# Load the entity vocab file
lowerCAmelCase : List[str] = load_original_entity_vocab(SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
lowerCAmelCase : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase : Union[str, Any] = AddedToken("<ent>" , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = AddedToken("<ent2>" , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "r" ) as f:
lowerCAmelCase : int = json.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(["#"] )[0]
lowerCAmelCase : Optional[int] = state_dict["embeddings.word_embeddings.weight"]
lowerCAmelCase : str = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase : int = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase : Optional[Any] = state_dict[bias_name]
lowerCAmelCase : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase : int = f"""encoder.layer.{layer_index}.attention.self."""
lowerCAmelCase : List[str] = state_dict[prefix + matrix_name]
lowerCAmelCase : Optional[int] = state_dict[prefix + matrix_name]
lowerCAmelCase : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCAmelCase : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCAmelCase : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase : Optional[Any] = state_dict["entity_predictions.bias"]
lowerCAmelCase : Any = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCAmelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase : Dict = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
lowerCAmelCase : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
lowerCAmelCase : str = state_dict[key]
else:
lowerCAmelCase : int = state_dict[key]
lowerCAmelCase , lowerCAmelCase : Optional[int] = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if set(SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , task="entity_classification" )
lowerCAmelCase : str = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowerCAmelCase : Dict = (0, 9)
lowerCAmelCase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
lowerCAmelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : Tuple = torch.Size((1, 3_3, 7_6_8) )
lowerCAmelCase : Dict = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : Tuple = torch.Size((1, 1, 7_6_8) )
lowerCAmelCase : Optional[int] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase : Tuple = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = "Tokyo is the capital of <mask>."
lowerCAmelCase : Optional[int] = (2_4, 3_0)
lowerCAmelCase : Any = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
lowerCAmelCase : Any = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = encoding["input_ids"][0].tolist()
lowerCAmelCase : str = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
lowerCAmelCase : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(SCREAMING_SNAKE_CASE ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["[MASK]", "[PAD]", "[UNK]"]
lowerCAmelCase : Tuple = [json.loads(SCREAMING_SNAKE_CASE ) for line in open(SCREAMING_SNAKE_CASE )]
lowerCAmelCase : Any = {}
for entry in data:
lowerCAmelCase : Tuple = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase : Tuple = entity_id
break
lowerCAmelCase : Tuple = f"""{language}:{entity_name}"""
lowerCAmelCase : Tuple = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 108 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCamelCase_ , UpperCamelCase_ = coefficient_matrix.shape
UpperCamelCase_ , UpperCamelCase_ = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if colsa != 1:
UpperCamelCase_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if rowsa != rowsa:
UpperCamelCase_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase)
if len(_lowerCAmelCase) != rowsa:
UpperCamelCase_ = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(_lowerCAmelCase)} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
UpperCamelCase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
UpperCamelCase_ , UpperCamelCase_ = table.shape
strictly_diagonally_dominant(_lowerCAmelCase)
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase):
UpperCamelCase_ = []
for row in range(_lowerCAmelCase):
UpperCamelCase_ = 0
for col in range(_lowerCAmelCase):
if col == row:
UpperCamelCase_ = table[row][col]
elif col == cols - 1:
UpperCamelCase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase_ = (temp + val) / denom
new_val.append(_lowerCAmelCase)
UpperCamelCase_ = new_val
return [float(_lowerCAmelCase) for i in new_val]
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = table.shape
UpperCamelCase_ = True
for i in range(0 , _lowerCAmelCase):
UpperCamelCase_ = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : str = KandinskyVaaPriorPipeline
snake_case : Union[str, Any] = ["""prompt"""]
snake_case : Optional[int] = ["""prompt""", """negative_prompt"""]
snake_case : int = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
snake_case : Tuple = False
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return self.time_input_dim
@property
def snake_case_ (self ):
return self.time_input_dim * 4
@property
def snake_case_ (self ):
return 1_0_0
@property
def snake_case_ (self ):
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowerCAmelCase__ )
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
_UpperCAmelCase : List[str] = PriorTransformer(**lowerCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_UpperCAmelCase : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_UpperCAmelCase : str = CLIPVisionModelWithProjection(lowerCAmelCase__ )
return model
@property
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.dummy_prior
_UpperCAmelCase : int = self.dummy_image_encoder
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = self.dummy_tokenizer
_UpperCAmelCase : List[Any] = self.dummy_image_processor
_UpperCAmelCase : Dict = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , clip_sample_range=1_0.0 , )
_UpperCAmelCase : Any = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCAmelCase : List[str] = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : int = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : str = """cpu"""
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : List[Any] = output.image_embeds
_UpperCAmelCase : List[str] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : List[str] = image[0, -1_0:]
_UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_UpperCAmelCase : str = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ (self ):
_UpperCAmelCase : Dict = torch_device == """cpu"""
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : int = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , test_mean_pixel_difference=lowerCAmelCase__ , )
@skip_mps
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = torch_device == """cpu"""
_UpperCAmelCase : List[str] = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase__ , test_mean_pixel_difference=lowerCAmelCase__ , )
| 359 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
if config_name_or_path is None:
_UpperCAmelCase : List[Any] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
_UpperCAmelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_UpperCAmelCase : Optional[int] = question_encoder_name_or_path
_UpperCAmelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
_UpperCAmelCase : List[Any] = RagConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Dict = gen_config
_UpperCAmelCase : int = question_encoder_config
_UpperCAmelCase : Optional[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
rag_model.save_pretrained(lowerCAmelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase_ )
# Save tokenizers.
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 170 | 0 |
'''simple docstring'''
import string
import numpy
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a, lowercase__ )
class lowerCAmelCase :
lowerCAmelCase_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase_ = numpy.vectorize(lambda A : x % 3_6 )
lowerCAmelCase_ = numpy.vectorize(A )
def __init__( self : int , __lowercase : numpy.ndarray ):
"""simple docstring"""
__lowercase =self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowercase =encrypt_key.shape[0]
def snake_case ( self : Any , __lowercase : str ):
"""simple docstring"""
return self.key_string.index(__lowercase )
def snake_case ( self : Any , __lowercase : int ):
"""simple docstring"""
return self.key_string[round(__lowercase )]
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase =det % len(self.key_string )
__lowercase =len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__lowercase =(
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__lowercase )
def snake_case ( self : Tuple , __lowercase : str ):
"""simple docstring"""
__lowercase =[char for char in text.upper() if char in self.key_string]
__lowercase =chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def snake_case ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
__lowercase =self.process_text(text.upper() )
__lowercase =''
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__lowercase =text[i : i + self.break_key]
__lowercase =[self.replace_letters(__lowercase ) for char in batch]
__lowercase =numpy.array([vec] ).T
__lowercase =self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__lowercase =''.join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase =det % len(self.key_string )
__lowercase =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowercase =i
break
__lowercase =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def snake_case ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
__lowercase =self.make_decrypt_key()
__lowercase =self.process_text(text.upper() )
__lowercase =''
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__lowercase =text[i : i + self.break_key]
__lowercase =[self.replace_letters(__lowercase ) for char in batch]
__lowercase =numpy.array([vec] ).T
__lowercase =self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__lowercase =''.join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =int(input('Enter the order of the encryption key: ' ) )
__lowercase =[]
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowercase__ ):
__lowercase =[int(lowercase__ ) for x in input().split()]
hill_matrix.append(lowercase__ )
__lowercase =HillCipher(numpy.array(lowercase__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__lowercase =input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__lowercase =input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowercase__ ) )
elif option == "2":
__lowercase =input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 141 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : str, lowercase__ : bool = False ):
'''simple docstring'''
if not isinstance(lowercase__, lowercase__ ):
__lowercase =F'''Expected string as input, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
if not isinstance(lowercase__, lowercase__ ):
__lowercase =F'''Expected boolean as use_pascal parameter, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
__lowercase =input_str.split('_' )
__lowercase =0 if use_pascal else 1
__lowercase =words[start_index:]
__lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
__lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 141 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : List[Any] = BlenderbotSmallConfig
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , A : List[str] , A : Optional[Any]=1_3 , A : Any=7 , A : Optional[Any]=True , A : List[Any]=False , A : Tuple=9_9 , A : List[str]=3_2 , A : Tuple=2 , A : Tuple=4 , A : int=3_7 , A : Dict=0.1 , A : Optional[Any]=0.1 , A : str=2_0 , A : Optional[int]=2 , A : int=1 , A : Tuple=0 , ) ->str:
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Dict = seq_length
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : List[Any] = eos_token_id
lowerCamelCase__ : Any = pad_token_id
lowerCamelCase__ : Dict = bos_token_id
def __lowerCamelCase ( self : Optional[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : Any = prepare_blenderbot_small_inputs_dict(A , A , A )
return config, inputs_dict
def __lowerCamelCase ( self : Tuple , A : Union[str, Any] , A : List[Any] ) ->str:
lowerCamelCase__ : int = TFBlenderbotSmallModel(config=A ).get_decoder()
lowerCamelCase__ : Dict = inputs_dict['''input_ids''']
lowerCamelCase__ : int = input_ids[:1, :]
lowerCamelCase__ : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : List[Any] = inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[int] = 1
# first forward pass
lowerCamelCase__ : Tuple = model(A , attention_mask=A , head_mask=A , use_cache=A )
lowerCamelCase__ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__ : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__ : List[Any] = model(A , attention_mask=A )[0]
lowerCamelCase__ : Optional[Any] = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__ : str = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCAmelCase : List[str] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : int = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = False
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Any = TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self , config_class=A )
def __lowerCamelCase ( self : int ) ->Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Dict = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
_UpperCAmelCase : str = "facebook/blenderbot_small-90M"
@cached_property
def __lowerCamelCase ( self : int ) ->Optional[int]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
lowerCamelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Any = self.tokenizer(self.src_text , return_tensors='''tf''' )
lowerCamelCase__ : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
lowerCamelCase__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 363 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A : List[Any] = 'base_with_context'
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCamelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Any = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : int = ly_weight['''attention''']
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Tuple = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : str = ly_weight['''attention''']
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ : List[Any] = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = ly_weight['''self_attention''']
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = ly_weight['''MultiHeadDotProductAttention_0''']
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase )
lowerCamelCase__ : List[str] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCamelCase__ : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCamelCase__ : Optional[Any] = inference.parse_training_gin_file(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase )
lowerCamelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCamelCase__ : str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ : Optional[int] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : int = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCamelCase__ : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase , continuous_encoder=UpperCAmelCase , decoder=UpperCAmelCase , scheduler=UpperCAmelCase , melgan=UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_A : Tuple = parser.parse_args()
main(args)
| 265 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Optional[int] =logging.get_logger(__name__)
a__ : List[str] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a__ : Optional[Any] ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
a__ : List[str] ={'''facebook/blenderbot-3B''': 128}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : Any =BlenderbotTokenizer
def __init__( self : Union[str, Any] , __A : Tuple=None , __A : Optional[Any]=None , __A : List[Any]=None , __A : int="replace" , __A : str="<s>" , __A : int="</s>" , __A : Union[str, Any]="</s>" , __A : Any="<s>" , __A : Optional[Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : str="<mask>" , __A : List[Any]=False , __A : Tuple=True , **__A : List[str] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space:
__UpperCamelCase = getattr(__A , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__A )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = 'post_processor'
__UpperCamelCase = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state['sep'] )
if "cls" in state:
__UpperCamelCase = tuple(state['cls'] )
__UpperCamelCase = False
if state.get('add_prefix_space' , __A ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get('trim_offsets' , __A ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(__A , state.pop('type' ) )
__UpperCamelCase = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self : Any , __A : Tuple ):
__UpperCamelCase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
__UpperCamelCase = value
def _lowerCamelCase ( self : Optional[Any] , *__A : Optional[int] , **__A : Tuple ):
__UpperCamelCase = kwargs.get('is_split_into_words' , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def _lowerCamelCase ( self : Tuple , *__A : Dict , **__A : List[str] ):
__UpperCamelCase = kwargs.get('is_split_into_words' , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
__UpperCamelCase = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowerCamelCase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : str , __A : "Conversation" ):
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
__UpperCamelCase = ' '.join(__A )
__UpperCamelCase = self.encode(__A )
if len(__A ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 53 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[Any] ={
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model"
def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = project_dim
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model"
def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
__UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="altclip"
SCREAMING_SNAKE_CASE_ : Optional[int] =True
def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCamelCase = kwargs.pop('text_config_dict' , __A )
__UpperCamelCase = kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCamelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCamelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCamelCase = {
str(__A ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__UpperCamelCase = AltCLIPTextConfig(**__A )
__UpperCamelCase = AltCLIPVisionConfig(**__A )
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
_lowercase : Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
a__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
a__ : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
a__ : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
a__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
a__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
a__ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def a ( self : List[str] ):
__UpperCAmelCase = {}
if self.train_dir is not None:
__UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
__UpperCAmelCase = self.validation_dir
__UpperCAmelCase = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=_lowerCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
a__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowerCAmelCase )} , )
a__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
a__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
a__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a__ : str = field(default=_lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
a__ : bool = field(
default=_lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={"help": "Stride to use for the encoder."} , )
class _UpperCAmelCase :
def __init__( self : Optional[Any] , _lowercase : Union[str, Any]=1_92 , _lowercase : Optional[int]=32 , _lowercase : List[Any]=4 , _lowercase : str=0.6 ):
__UpperCAmelCase = input_size
__UpperCAmelCase = mask_patch_size
__UpperCAmelCase = model_patch_size
__UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__UpperCAmelCase = self.input_size // self.mask_patch_size
__UpperCAmelCase = self.mask_patch_size // self.model_patch_size
__UpperCAmelCase = self.rand_size**2
__UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Optional[int] ):
__UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
__UpperCAmelCase = np.zeros(self.token_count , dtype=_lowercase )
__UpperCAmelCase = 1
__UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
__UpperCAmelCase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = torch.stack([example['''pixel_values'''] for example in examples] )
__UpperCAmelCase = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowercase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCAmelCase = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
__UpperCAmelCase = ds['''train'''].train_test_split(data_args.train_val_split )
__UpperCAmelCase = split['''train''']
__UpperCAmelCase = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case_ )
elif model_args.model_name_or_path:
__UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
__UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case_ , '''decoder_type''' ):
__UpperCAmelCase = '''simmim'''
# adapt config
__UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
__UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
__UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
__UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(snake_case_ )
if training_args.do_train:
__UpperCAmelCase = ds['''train'''].column_names
else:
__UpperCAmelCase = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
__UpperCAmelCase = '''image'''
elif "img" in column_names:
__UpperCAmelCase = '''img'''
else:
__UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__UpperCAmelCase = Compose(
[
Lambda(lambda snake_case_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case_ :Tuple ):
__UpperCAmelCase = [transforms(snake_case_ ) for image in examples[image_column_name]]
__UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__UpperCAmelCase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__UpperCAmelCase = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Initialize our trainer
__UpperCAmelCase = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
__UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase = last_checkpoint
__UpperCAmelCase = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
# Write model card and (optionally) push to hub
__UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 86 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = CanineTokenizer
_snake_case : Optional[Any] = False
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
super().setUp()
_UpperCamelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def snake_case__ ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
_UpperCamelCase = 1024
return tokenizer
@require_torch
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.canine_tokenizer
_UpperCamelCase = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
_UpperCamelCase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
_UpperCamelCase = tokenizer(A_ , padding=A_ , return_tensors='''pt''' )
self.assertIsInstance(A_ , A_ )
_UpperCamelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.canine_tokenizer
_UpperCamelCase = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
_UpperCamelCase = tokenizer(A_ , padding=A_ , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , A_ )
self.assertIn('''attention_mask''' , A_ )
self.assertIn('''token_type_ids''' , A_ )
@require_torch
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.canine_tokenizer
_UpperCamelCase = [
"What's the weater?",
"It's about 25 degrees.",
]
_UpperCamelCase = tokenizer(
text_target=A_ , max_length=32 , padding='''max_length''' , truncation=A_ , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = " He is very happy, UNwant\u00E9d,running"
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
_UpperCamelCase = tokenizer.__class__.from_pretrained(A_ )
_UpperCamelCase = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
_UpperCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = " He is very happy, UNwant\u00E9d,running"
_UpperCamelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_UpperCamelCase = chr(0Xe0_07 )
additional_special_tokens.append(A_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
_UpperCamelCase = tokenizer.__class__.from_pretrained(A_ )
_UpperCamelCase = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn(A_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_UpperCamelCase = tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_UpperCamelCase = self.get_clean_sequence(A_ )
# a special token for Canine can be defined as follows:
_UpperCamelCase = 0Xe0_05
_UpperCamelCase = chr(A_ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertEqual(len(A_ ) , 1 )
_UpperCamelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A_ )
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , input_encoded + special_token_id )
_UpperCamelCase = tokenizer.decode(A_ , skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_UpperCamelCase = chr(0Xe0_05 )
_UpperCamelCase = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
_UpperCamelCase = tokenizer.tokenize(A_ )
_UpperCamelCase = tokenizer.tokenize(A_ )
self.assertEqual(len(A_ ) , 1 )
self.assertEqual(len(A_ ) , 1 )
self.assertEqual(token_a[0] , A_ )
self.assertEqual(token_a[0] , A_ )
@require_tokenizers
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
_UpperCamelCase = 0Xe0_06
_UpperCamelCase = chr(A_ )
_UpperCamelCase = AddedToken(A_ , lstrip=A_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(A_ )
tokenizer.from_pretrained(A_ )
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
_UpperCamelCase = json.load(A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
_UpperCamelCase = json.load(A_ )
# a special token for Canine can be defined as follows:
_UpperCamelCase = 0Xe0_06
_UpperCamelCase = chr(A_ )
_UpperCamelCase = [new_token_a]
_UpperCamelCase = [new_token_a]
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_UpperCamelCase = tokenizer_class.from_pretrained(A_ , extra_ids=0 )
self.assertIn(A_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_UpperCamelCase = 0Xe0_07
_UpperCamelCase = chr(A_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_UpperCamelCase = [AddedToken(A_ , lstrip=A_ )]
_UpperCamelCase = tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , extra_ids=0 )
self.assertIn(A_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_UpperCamelCase = "hello world"
if self.space_between_special_tokens:
_UpperCamelCase = "[CLS] hello world [SEP]"
else:
_UpperCamelCase = input
_UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_UpperCamelCase = tokenizer.decode(A_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(A_ , [output, output.lower()] )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_UpperCamelCase = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
_UpperCamelCase = "a"
_UpperCamelCase = ord(A_ )
for attr in attributes_list:
setattr(A_ , attr + '''_id''' , A_ )
self.assertEqual(getattr(A_ , A_ ) , A_ )
self.assertEqual(getattr(A_ , attr + '''_id''' ) , A_ )
setattr(A_ , attr + '''_id''' , A_ )
self.assertEqual(getattr(A_ , A_ ) , A_ )
self.assertEqual(getattr(A_ , attr + '''_id''' ) , A_ )
setattr(A_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(A_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(A_ , '''additional_special_tokens_ids''' ) , [] )
_UpperCamelCase = 0Xe0_06
_UpperCamelCase = chr(A_ )
setattr(A_ , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(A_ , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(A_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
def snake_case__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
| 324 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =LayoutLMTokenizer
lowerCamelCase : Optional[int] =LayoutLMTokenizerFast
lowerCamelCase : Tuple =True
lowerCamelCase : Tuple =True
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = """UNwant\u00E9d,running"""
__lowerCAmelCase : Optional[Any] = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
pass
| 139 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =AutoencoderKL
lowerCamelCase : Tuple ="sample"
lowerCamelCase : Dict =1e-2
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = 4
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Optional[Any] = (32, 32)
__lowerCAmelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
return (3, 32, 32)
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase : Dict = self.model_class(**lowerCAmelCase )
model.to(lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
__lowerCAmelCase : str = model(**lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__lowerCAmelCase : Any = torch.randn_like(lowerCAmelCase )
__lowerCAmelCase : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__lowerCAmelCase : List[str] = self.model_class(**lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__lowerCAmelCase : Any = model_a(**lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__lowerCAmelCase : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__lowerCAmelCase : int = dict(model.named_parameters() )
__lowerCAmelCase : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase )
__lowerCAmelCase : int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
__lowerCAmelCase : Optional[Any] = model.to(lowerCAmelCase )
model.eval()
if torch_device == "mps":
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
else:
__lowerCAmelCase : Any = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
__lowerCAmelCase : Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCAmelCase : Optional[int] = image.to(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , sample_posterior=lowerCAmelCase , generator=lowerCAmelCase ).sample
__lowerCAmelCase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__lowerCAmelCase : List[str] = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__lowerCAmelCase : Tuple = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-2 ) )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> int:
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : Any=(4, 3, 5_12, 5_12) , lowerCAmelCase : Any=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa
__lowerCAmelCase : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) ).to(lowerCAmelCase ).to(lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" , lowerCAmelCase : int=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """fp16""" if fpaa else None
__lowerCAmelCase : List[str] = torch.floataa if fpaa else torch.floataa
__lowerCAmelCase : Dict = AutoencoderKL.from_pretrained(
lowerCAmelCase , subfolder="""vae""" , torch_dtype=lowerCAmelCase , revision=lowerCAmelCase , )
model.to(lowerCAmelCase ).eval()
return model
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Tuple=0 ) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase )
return torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_sd_vae_model()
__lowerCAmelCase : Optional[int] = self.get_sd_image(lowerCAmelCase )
__lowerCAmelCase : List[str] = self.get_generator(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , generator=lowerCAmelCase , sample_posterior=lowerCAmelCase ).sample
assert sample.shape == image.shape
__lowerCAmelCase : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCAmelCase : List[str] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=lowerCAmelCase )
__lowerCAmelCase : Tuple = self.get_sd_image(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = self.get_generator(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Dict = model(lowerCAmelCase , generator=lowerCAmelCase , sample_posterior=lowerCAmelCase ).sample
assert sample.shape == image.shape
__lowerCAmelCase : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCAmelCase : Optional[int] = torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_sd_vae_model()
__lowerCAmelCase : Optional[int] = self.get_sd_image(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(lowerCAmelCase ).sample
assert sample.shape == image.shape
__lowerCAmelCase : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowerCAmelCase : str = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_sd_vae_model()
__lowerCAmelCase : Optional[Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__lowerCAmelCase : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
__lowerCAmelCase : Tuple = torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_sd_vae_model(fpaa=lowerCAmelCase )
__lowerCAmelCase : str = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Dict = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__lowerCAmelCase : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowerCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model.decode(lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCAmelCase : int = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_sd_vae_model()
__lowerCAmelCase : Optional[Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model.decode(lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowerCAmelCase : Tuple = model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : int , lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_sd_vae_model()
__lowerCAmelCase : List[str] = self.get_sd_image(lowerCAmelCase )
__lowerCAmelCase : Any = self.get_generator(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.encode(lowerCAmelCase ).latent_dist
__lowerCAmelCase : Union[str, Any] = dist.sample(generator=lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__lowerCAmelCase : Any = sample[0, -1, -3:, -3:].flatten().cpu()
__lowerCAmelCase : int = torch.tensor(lowerCAmelCase )
__lowerCAmelCase : str = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase )
| 139 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( _lowercase):
_a : Union[str, Any] = (UniPCMultistepScheduler,)
_a : Any = (('''num_inference_steps''', 25),)
def UpperCAmelCase__( self : Dict , **_SCREAMING_SNAKE_CASE : Any )-> str:
lowerCAmelCase__ : List[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Dict=0 , **_SCREAMING_SNAKE_CASE : int )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : List[str] = kwargs.pop('''num_inference_steps''' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.dummy_sample
lowerCAmelCase__ : Optional[int] = 0.1 * sample
lowerCAmelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = sample, sample
for t in range(_SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase__ : str = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase__ : Any = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Dict=0 , **_SCREAMING_SNAKE_CASE : List[str] )-> Dict:
lowerCAmelCase__ : Any = dict(self.forward_default_kwargs )
lowerCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = self.dummy_sample
lowerCAmelCase__ : Optional[Any] = 0.1 * sample
lowerCAmelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase__ : List[Any] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str=None , **_SCREAMING_SNAKE_CASE : Dict )-> int:
if scheduler is None:
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = 10
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def UpperCAmelCase__( self : List[str] )-> Dict:
lowerCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample
lowerCAmelCase__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase__ : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase__ : str = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase__ : str = scheduler.timesteps[5]
lowerCAmelCase__ : str = scheduler.timesteps[6]
lowerCAmelCase__ : Any = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase__ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__( self : Any )-> int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase__ : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase__ : List[Any] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
lowerCAmelCase__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : str = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : str = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__( self : List[Any] )-> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> Optional[int]:
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : str )-> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Dict:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Tuple = self.full_loop(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
assert not torch.isnan(_SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def UpperCAmelCase__( self : Optional[int] )-> Tuple:
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Tuple:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=0 )
def UpperCAmelCase__( self : int )-> Any:
lowerCAmelCase__ : str = self.full_loop()
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[int]:
lowerCAmelCase__ : int = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Any = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def UpperCAmelCase__( self : List[str] )-> Optional[int]:
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config(thresholding=_SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase__ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = 10
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__( self : Any , **_SCREAMING_SNAKE_CASE : Union[str, Any] )-> List[Any]:
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : str = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 131 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class _a ( _lowercase):
_a : Dict = VOCAB_FILES_NAMES
_a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Dict = BlenderbotSmallTokenizer
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Tuple="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Any="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Union[str, Any]="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : List[Any]=True , **_SCREAMING_SNAKE_CASE : Optional[Any] , )-> Union[str, Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=_SCREAMING_SNAKE_CASE , merges=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , ) , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : List[str] = add_prefix_space
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=None )-> Optional[int]:
lowerCAmelCase__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 131 | 1 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
if n_term == "":
return []
__UpperCamelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(f"1/{temp + 1}" if series else """1""" )
return series
if __name__ == "__main__":
a__ : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 364 |
'''simple docstring'''
def _lowercase ( __A = 10 ,__A = 22 ):
'''simple docstring'''
__UpperCamelCase = range(1 ,__A )
__UpperCamelCase = range(1 ,__A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 243 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Any = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str ,A : Optional[Any]=5_02_67 ,A : int=10_24 ,A : List[Any]=12 ,A : Any=40_96 ,A : Dict=16 ,A : Any=12 ,A : Optional[int]=40_96 ,A : Optional[int]=16 ,A : List[Any]=0.0 ,A : List[Any]=0.0 ,A : Optional[Any]="gelu" ,A : int=10_24 ,A : int=0.1 ,A : Tuple=0.0 ,A : Optional[Any]=0.0 ,A : Optional[Any]=0.02 ,A : str=0.0 ,A : Any=False ,A : Optional[Any]=True ,A : str=1 ,A : Optional[Any]=0 ,A : Optional[Any]=2 ,A : List[Any]=True ,A : int=2 ,A : str=2 ,A : List[Any]=False ,A : str=1_00 ,A : Any=8_00 ,**A : str ,):
__A = vocab_size
__A = max_position_embeddings
__A = d_model
__A = encoder_ffn_dim
__A = encoder_layers
__A = encoder_attention_heads
__A = decoder_ffn_dim
__A = decoder_layers
__A = decoder_attention_heads
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = activation_function
__A = init_std
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = classifier_dropout
__A = use_cache
__A = encoder_layers
__A = scale_embedding # scale factor will be sqrt(d_model) if True
__A = use_prompt
__A = prompt_length
__A = prompt_mid_dim
super().__init__(
pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,is_encoder_decoder=A ,decoder_start_token_id=A ,forced_eos_token_id=A ,**A ,)
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" ,A ):
__A = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 15 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE :List[Any] = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE :List[str] = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
__A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__A = len(self.fairseq_tokens_to_ids )
__A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self : int ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 15 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( _lowerCAmelCase ):
A = '''ClapFeatureExtractor'''
A = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __call__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: int = kwargs.pop("""sampling_rate""", SCREAMING_SNAKE_CASE_ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
UpperCAmelCase_: Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
if audios is not None:
UpperCAmelCase_: Union[str, Any] = self.feature_extractor(
SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
if text is not None and audios is not None:
UpperCAmelCase_: Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ), tensor_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase_: Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 82 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
a : Dict = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
a : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ElectraTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[int] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Union[str, Any] = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: List[Any] = tokenize_chinese_chars
UpperCAmelCase_: int = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Optional[int] = [self.sep_token_id]
UpperCAmelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=6.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase="fp4" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = load_in_abit
UpperCAmelCase_ : Any = load_in_abit
UpperCAmelCase_ : List[Any] = llm_inta_threshold
UpperCAmelCase_ : Tuple = llm_inta_skip_modules
UpperCAmelCase_ : Tuple = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase_ : Optional[Any] = llm_inta_has_fpaa_weight
UpperCAmelCase_ : Union[str, Any] = bnb_abit_quant_type
UpperCAmelCase_ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase_ : int = torch.floataa
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = getattr(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , torch.dtype ):
UpperCAmelCase_ : Optional[Any] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def __UpperCAmelCase ( self ) -> int:
if not isinstance(self.llm_inta_threshold , _UpperCamelCase ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCamelCase ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCamelCase ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCamelCase ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _UpperCamelCase ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCamelCase ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def __UpperCAmelCase ( self ) -> str:
return self.load_in_abit or self.load_in_abit
def __UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : str = cls(**_UpperCamelCase )
UpperCAmelCase_ : Dict = []
for key, value in kwargs.items():
if hasattr(_UpperCamelCase , _UpperCamelCase ):
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
to_remove.append(_UpperCamelCase )
for key in to_remove:
kwargs.pop(_UpperCamelCase , _UpperCamelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase_ : Union[str, Any] = self.to_dict()
UpperCAmelCase_ : Optional[Any] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '\n'
writer.write(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Optional[Any]:
return f"{self.__class__.__name__} {self.to_json_string()}"
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> str:
if use_diff is True:
UpperCAmelCase_ : Tuple = self.to_diff_dict()
else:
UpperCAmelCase_ : Dict = self.to_dict()
return json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
UpperCAmelCase_ : str = self.to_dict()
# get the default config dict
UpperCAmelCase_ : Optional[Any] = BitsAndBytesConfig().to_dict()
UpperCAmelCase_ : Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase_ : List[str] = value
return serializable_config_dict
| 29 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "conditional_detr"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__A , __A ):
a =backbone_config.get('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__A )
a =use_timm_backbone
a =backbone_config
a =num_channels
a =num_queries
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =init_xavier_std
a =encoder_layerdrop
a =decoder_layerdrop
a =encoder_layers
a =auxiliary_loss
a =position_embedding_type
a =backbone
a =use_pretrained_backbone
a =dilation
# Hungarian matcher
a =class_cost
a =bbox_cost
a =giou_cost
# Loss coefficients
a =mask_loss_coefficient
a =dice_loss_coefficient
a =cls_loss_coefficient
a =bbox_loss_coefficient
a =giou_loss_coefficient
a =focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a =self.backbone_config.to_dict()
a =self.__class__.model_type
return output
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return 12 | 81 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : Optional[int] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__a : Any = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__a : Tuple = '''The dog is cute and lives in the garden house'''
__a : Any = jnp.array([tokenizer.encode(_UpperCAmelCase )] )
__a : Tuple = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__a : Optional[int] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
__a : Optional[Any] = model(_UpperCAmelCase )['''last_hidden_state''']
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) ) | 188 |
"""simple docstring"""
import os
import string
import sys
A = 1 << 8
A = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A = KEYMAP['''up''']
A = KEYMAP['''left''']
if sys.platform == "win32":
A = []
A = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A = ord(str(i))
def __A ( ) -> Dict:
if os.name == "nt":
import msvcrt
__a : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a_) == 0:
# Read the keystroke
__a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__a : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__a : Union[str, Any] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(a_)
if ord(a_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26))
__a : str = chr(KEYMAP['''esc'''])
except KeyError:
__a : str = cha[1]
else:
__a : Optional[Any] = ch.decode(a_)
else:
__a : Union[str, Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
__a : Any = sys.stdin.fileno()
__a : List[str] = termios.tcgetattr(a_)
try:
tty.setraw(a_)
__a : int = sys.stdin.read(1)
finally:
termios.tcsetattr(a_ , termios.TCSADRAIN , a_)
return ch
def __A ( ) -> str:
__a : Any = get_raw_chars()
if ord(a_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a_) == KEYMAP["esc"]:
__a : str = get_raw_chars()
if ord(a_) == KEYMAP["mod_int"]:
__a : List[str] = get_raw_chars()
if ord(a_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 188 | 1 |
'''simple docstring'''
lowercase__ : Dict = [
(10_00, 'M'),
(9_00, 'CM'),
(5_00, 'D'),
(4_00, 'CD'),
(1_00, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def a__ ( lowercase : str ) -> int:
"""simple docstring"""
_UpperCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
_UpperCamelCase = 0
_UpperCamelCase = 0
while place < len(lowercase ):
if (place + 1 < len(lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a__ ( lowercase : int ) -> str:
"""simple docstring"""
_UpperCamelCase = []
for arabic, roman in ROMAN:
((_UpperCamelCase) , (_UpperCamelCase)) = divmod(lowercase, lowercase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 1 |
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: int = 2_0 ):
"""simple docstring"""
UpperCAmelCase_: List[str] = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_: int = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 82 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase ):
A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", ["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: List[Any] = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics, public health""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: Tuple = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: str = classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""This text is about {}""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_: Union[str, Any] = classifier(["""I am happy"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
], )
UpperCAmelCase_: Dict = classifier(["""I am happy""", """I am sad"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""""", candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_, candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels="""""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""Not formatting template""", )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template=SCREAMING_SNAKE_CASE_, )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = zero_shot_classifier.model.config
UpperCAmelCase_: Optional[int] = config.labelaid
UpperCAmelCase_: str = zero_shot_classifier.entailment_id
UpperCAmelCase_: Union[str, Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
UpperCAmelCase_: int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Dict = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
UpperCAmelCase_: Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_, zero_shot_classifier.entailment_id )
@require_torch
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100, candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
UpperCAmelCase_: Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@require_tf
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""tf""", )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@slow
@require_torch
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""pt""" )
UpperCAmelCase_: Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
@slow
@require_tf
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""tf""" )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
| 82 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = {}
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "all_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
__lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"""can't find {path}""" )
return results
UpperCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
import xla_spawn
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_A , "argv" , _A ):
__lowerCAmelCase = time()
xla_spawn.main()
__lowerCAmelCase = time()
__lowerCAmelCase = get_results(_A )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
import xla_spawn
__lowerCAmelCase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(_A , "argv" , _A ):
xla_spawn.main()
| 92 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( snake_case__ , unittest.TestCase ):
_a : Dict = KandinskyImgaImgPipeline
_a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_a : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_a : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_a : int = False
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1_0_0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__lowerCAmelCase = MultilingualCLIP(_A )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**_A )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 92 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a : Dict = 2
class UpperCamelCase_ :
def __init__( self , *, # begin keyword-only arguments
A="<s>" , A="<pad>" , A="</s>" , A="<unk>" , A=None , ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = bos, unk, pad, eos
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Dict = {}
UpperCAmelCase : List[Any] = self.add_symbol(A )
UpperCAmelCase : List[str] = self.add_symbol(A )
UpperCAmelCase : int = self.add_symbol(A )
UpperCAmelCase : List[Any] = self.add_symbol(A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A )
UpperCAmelCase : List[str] = len(self.symbols )
def __eq__( self , A ) -> Tuple:
return self.indices == other.indices
def __getitem__( self , A ) -> Optional[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Optional[int]:
return len(self.symbols )
def __contains__( self , A ) -> List[Any]:
return sym in self.indices
@classmethod
def _lowercase( cls , A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = cls()
d.add_from_file(A )
return d
def _lowercase( self , A , A=1 , A=False ) -> List[str]:
if word in self.indices and not overwrite:
UpperCAmelCase : List[Any] = self.indices[word]
UpperCAmelCase : int = self.count[idx] + n
return idx
else:
UpperCAmelCase : Optional[int] = len(self.symbols )
UpperCAmelCase : List[str] = idx
self.symbols.append(A )
self.count.append(A )
return idx
def _lowercase( self , A ) -> Dict:
return 0
def _lowercase( self , A ) -> Optional[Any]:
if isinstance(A , A ):
try:
with open(A , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(A ) )
return
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[Any] = self._load_meta(A )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase , UpperCAmelCase : str = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase : Any = True
UpperCAmelCase , UpperCAmelCase : str = line.rsplit(""" """ , 1 )
else:
UpperCAmelCase : Dict = False
UpperCAmelCase : List[Any] = int(A )
UpperCAmelCase : Any = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(A ) )
self.add_symbol(A , n=A , overwrite=A )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase : Optional[Any] = dict((re.sub(R"""@@$""" , """""" , _lowercase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , _lowercase ), v) for k, v in d.items() )
UpperCAmelCase : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCAmelCase : Optional[Any] = d[k] # restore
return da
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
# prep
if not os.path.exists(_lowercase ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase : Optional[int] = os.path.join(_lowercase , """checkpoint.pt""" )
if not os.path.isfile(_lowercase ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
UpperCAmelCase : Optional[int] = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : List[Any] = chkpt["""cfg"""]["""model"""]
# dicts
UpperCAmelCase : List[Any] = os.path.join(_lowercase , """dict.txt""" )
if not os.path.isfile(_lowercase ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
UpperCAmelCase : Any = Dictionary.load(_lowercase )
UpperCAmelCase : Dict = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase : Optional[int] = len(_lowercase )
UpperCAmelCase : Dict = os.path.join(_lowercase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# merges_file (bpecodes)
UpperCAmelCase : Tuple = os.path.join(_lowercase , """bpecodes""" )
if not os.path.isfile(_lowercase ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
UpperCAmelCase : List[Any] = os.path.join(_lowercase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(_lowercase , _lowercase )
# model config
UpperCAmelCase : List[str] = os.path.join(_lowercase , """config.json""" )
UpperCAmelCase : Optional[Any] = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# tokenizer config
UpperCAmelCase : Tuple = os.path.join(_lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_0_2_4,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# model
UpperCAmelCase : Any = chkpt["""model"""]
# remove unneeded keys
UpperCAmelCase : Optional[int] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(_lowercase , _lowercase )
UpperCAmelCase : int = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
UpperCAmelCase : Tuple = model_state_dict.pop(_lowercase )
else:
UpperCAmelCase : Tuple = model_state_dict.pop(_lowercase )
UpperCAmelCase : List[Any] = BioGptConfig.from_pretrained(_lowercase )
UpperCAmelCase : Any = BioGptForCausalLM(_lowercase )
# check that it loads ok
model_new.load_state_dict(_lowercase )
# save
UpperCAmelCase : Union[str, Any] = os.path.join(_lowercase , _lowercase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowercase , _lowercase )
print("""Conversion is done!""" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : Optional[int] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 338 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
if "model" in sd.keys():
UpperCAmelCase : Any = torch.load(_lowercase , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
UpperCAmelCase : Union[str, Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowercase )
UpperCAmelCase : Tuple = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase : List[Any] = sd.pop(_lowercase )
UpperCAmelCase : Tuple = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase : List[str] = sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
UpperCAmelCase : Tuple = key.replace(""".qkv_proj.""" , """.k_proj.""" )
UpperCAmelCase : int = key.replace(""".qkv_proj.""" , """.v_proj.""" )
UpperCAmelCase : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = torch.split(_lowercase , depth // 3 , dim=0 )
UpperCAmelCase : Tuple = q
UpperCAmelCase : Tuple = k
UpperCAmelCase : Any = v
del sd[key]
return sd
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None ) -> Optional[Any]:
UpperCAmelCase : Tuple = load_checkpoint(_lowercase )
if config is not None:
UpperCAmelCase : Dict = OPTConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : int = OPTConfig()
UpperCAmelCase : List[Any] = OPTModel(_lowercase ).half().eval()
model.load_state_dict(_lowercase )
# Check results
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
a : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 338 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__A ='''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowerCamelCase_ ( lowerCamelCase__ = "mumbai" ):
lowerCamelCase_ = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
lowerCamelCase_ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
lowerCamelCase_ = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :List[str] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A_ : Optional[int] = 1_92
A_ : List[str] = 7_68
A_ : int = 12
A_ : List[str] = 3
A_ : Tuple = [8_00, 13_33]
A_ : Optional[int] = False
elif yolos_name == "yolos_s_dWr":
A_ : List[str] = 3_30
A_ : List[str] = 14
A_ : Optional[int] = 6
A_ : Dict = 13_20
elif "yolos_s" in yolos_name:
A_ : Any = 3_84
A_ : Dict = 15_36
A_ : Union[str, Any] = 12
A_ : int = 6
elif "yolos_b" in yolos_name:
A_ : Union[str, Any] = [8_00, 13_44]
A_ : Dict = 91
A_ : int = """huggingface/label-files"""
A_ : Dict = """coco-detection-id2label.json"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Dict = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Optional[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : Any = in_proj_weight[: config.hidden_size, :]
A_ : List[Any] = in_proj_bias[: config.hidden_size]
A_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[Any] = in_proj_weight[-config.hidden_size :, :]
A_ : Any = in_proj_bias[-config.hidden_size :]
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "backbone" in name:
A_ : List[str] = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
A_ : Tuple = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
A_ : Optional[Any] = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
A_ : int = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
A_ : Optional[int] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
A_ : Tuple = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
A_ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A_ : int = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A_ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : int = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : str = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
A_ : Tuple = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
A_ : Union[str, Any] = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
A_ : Tuple = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : Optional[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
A_ : Dict = key.split(""".""" )
A_ : Optional[Any] = int(key_split[2] )
A_ : str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A_ : Tuple = val[:dim, :]
A_ : Optional[Any] = val[
dim : dim * 2, :
]
A_ : Dict = val[-dim:, :]
else:
A_ : int = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : Dict = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
A_ : Any = get_yolos_config(lowerCamelCase__ )
# load original state_dict
A_ : Any = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
A_ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
A_ : Any = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
A_ : str = 8_00 if yolos_name != """yolos_ti""" else 5_12
A_ : Optional[Any] = YolosImageProcessor(format="""coco_detection""" , size=lowerCamelCase__ )
A_ : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : Any = model(**lowerCamelCase__ )
A_ : str = outputs.logits, outputs.pred_boxes
A_ : Tuple = None, None
if yolos_name == "yolos_ti":
A_ : Dict = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
A_ : Dict = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
A_ : Optional[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
A_ : int = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
A_ : List[str] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
A_ : Union[str, Any] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
A_ : Tuple = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
A_ : str = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
A_ : Dict = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
A_ : Union[str, Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
A_ : Optional[int] = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
A_ : int = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="""hustvl""" )
model.push_to_hub(lowerCamelCase__ , organization="""hustvl""" )
if __name__ == "__main__":
lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :List[str] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 364 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
A_ : Tuple = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : List[Any] = do_resize
A_ : List[str] = size
A_ : Dict = resample
A_ : int = do_rescale
A_ : str = rescale_factor
A_ : Tuple = do_center_crop
A_ : Tuple = crop_size
A_ : List[str] = do_flip_channel_order
def _a (self , lowercase , lowercase , lowercase = PIL.Image.BILINEAR , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Any = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Tuple = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
return flip_channel_order(lowercase , data_format=lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = resample if resample is not None else self.resample
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A_ : Union[str, Any] = size if size is not None else self.size
A_ : Dict = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Union[str, Any] = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : Union[str, Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : str = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : Dict = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A_ : List[str] = [self.flip_channel_order(image=lowercase ) for image in images]
A_ : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : str = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase ):
A_ : Dict = target_sizes.numpy()
A_ : Union[str, Any] = []
for idx in range(len(lowercase ) ):
A_ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase )
A_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
A_ : str = logits.argmax(dim=1 )
A_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 135 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if "cls_token" in name:
lowercase : List[Any] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase : Tuple = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : int = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase : List[Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase : List[str] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase : int = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
lowercase : int = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
lowercase : Tuple = config.decoder_hidden_size
lowercase : int = """decoder.decoder_layers."""
if "weight" in key:
lowercase : List[Any] = val[:dim, :]
lowercase : Tuple = val[dim : dim * 2, :]
lowercase : List[Any] = val[-dim:, :]
elif "bias" in key:
lowercase : str = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Tuple = config.hidden_size
lowercase : Union[str, Any] = """vit.encoder.layer."""
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Dict = val[-dim:, :]
elif "bias" in key:
lowercase : Any = val[:dim]
lowercase : str = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Union[str, Any] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase : Dict = 1_024
lowercase : str = 4_096
lowercase : Optional[Any] = 24
lowercase : Optional[Any] = 16
elif "huge" in checkpoint_url:
lowercase : int = 14
lowercase : List[Any] = 1_280
lowercase : int = 5_120
lowercase : List[Any] = 32
lowercase : Any = 16
lowercase : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Tuple = ViTMAEImageProcessor(size=config.image_size )
lowercase : Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowercase : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowercase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.logits
if "large" in checkpoint_url:
lowercase : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowercase : Tuple = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowercase : List[str] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : List[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 20 |
import os
import numpy
import onnx
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : int = a.name
lowercase : Any = b.name
lowercase : Optional[Any] = """"""
lowercase : Dict = """"""
lowercase : int = a == b
lowercase : int = name_a
lowercase : List[str] = name_b
return res
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Any = list(model.graph.initializer )
lowercase : Dict = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase : Union[str, Any] = inits[i].name
lowercase : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowercase : str = onnx.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : List[str] = list(model.graph.initializer )
lowercase : Tuple = set()
lowercase : int = {}
lowercase : Optional[Any] = []
lowercase : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE__ )
dup_set.add(SCREAMING_SNAKE_CASE__ )
lowercase : int = inits[j].data_type
lowercase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , SCREAMING_SNAKE_CASE__ )
total_reduced_size += mem_size
lowercase : Tuple = inits[i].name
lowercase : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE__ )
else:
lowercase : List[str] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_024 / 1_024 / 1_024 , """GB""" )
lowercase : str = sorted(SCREAMING_SNAKE_CASE__ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = """optimized_""" + model_file_name
lowercase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
onnx.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return new_model
| 20 | 1 |
from __future__ import annotations
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )-> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 267 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 267 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mobilenet_v2"
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.02 , _a=0.001 , _a=255 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = depth_multiplier
lowerCamelCase = depth_divisible_by
lowerCamelCase = min_depth
lowerCamelCase = expand_ratio
lowerCamelCase = output_stride
lowerCamelCase = first_layer_is_expansion
lowerCamelCase = finegrained_output
lowerCamelCase = hidden_act
lowerCamelCase = tf_padding
lowerCamelCase = classifier_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = semantic_loss_ignore_index
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return 1e-4
| 291 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__A : Any = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Optional[Any] = 'albert'
def __init__( self :int ,_UpperCamelCase :Tuple=3_0_0_0_0 ,_UpperCamelCase :Optional[int]=1_2_8 ,_UpperCamelCase :Dict=4_0_9_6 ,_UpperCamelCase :Tuple=1_2 ,_UpperCamelCase :List[str]=1 ,_UpperCamelCase :Dict=6_4 ,_UpperCamelCase :List[str]=1_6_3_8_4 ,_UpperCamelCase :Any=1 ,_UpperCamelCase :List[str]="gelu_new" ,_UpperCamelCase :int=0 ,_UpperCamelCase :Dict=0 ,_UpperCamelCase :Dict=5_1_2 ,_UpperCamelCase :Dict=2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Dict=1E-1_2 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :str="absolute" ,_UpperCamelCase :Optional[int]=0 ,_UpperCamelCase :List[str]=2 ,_UpperCamelCase :str=3 ,**_UpperCamelCase :Dict ,):
super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Optional[Any] = embedding_size
snake_case_ : int = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Optional[int] = num_hidden_groups
snake_case_ : Dict = num_attention_heads
snake_case_ : Any = inner_group_num
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Any = intermediate_size
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Any = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : Optional[int] = classifier_dropout_prob
snake_case_ : Optional[int] = position_embedding_type
class __UpperCamelCase ( lowercase__ ):
@property
def a__ ( self :List[Any] ):
if self.task == "multiple-choice":
snake_case_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 369 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__A : Dict = 'src/transformers'
# Matches is_xxx_available()
__A : Dict = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A : int = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__A : List[Any] = re.compile(r'^\s*try:')
# Catches a line with else:
__A : Any = re.compile(r'^\s*else:')
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
snake_case_ : List[Any] = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : Union[str, Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case_ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case_ : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ )
snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ )
snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : List[Any] = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case_ : Union[str, Any] = lines[line_index]
snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : Dict = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case_ : Dict = lines[line_index]
snake_case_ : Any = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ):
'''simple docstring'''
def find_duplicates(lowerCamelCase_ :Union[str, Any] ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : Optional[int] = []
for key in import_dict_objects.keys():
snake_case_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
snake_case_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" )
snake_case_ : Dict = parse_init(lowerCamelCase_ )
if objects is not None:
snake_case_ : Any = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase_ ) )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
__A : List[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCAmelCase ( ):
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ )
snake_case_ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f:
snake_case_ : str = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) )
snake_case_ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase_ ) > 0:
snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 8 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if args.model_type == "roberta":
SCREAMING_SNAKE_CASE__ = RobertaForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE__ = "roberta"
elif args.model_type == "gpt2":
SCREAMING_SNAKE_CASE__ = GPTaLMHeadModel.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE__ = "transformer"
SCREAMING_SNAKE_CASE__ = model.state_dict()
SCREAMING_SNAKE_CASE__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
SCREAMING_SNAKE_CASE__ = f'{prefix}.embeddings.{w}.weight'
SCREAMING_SNAKE_CASE__ = state_dict[param_name]
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = f'{prefix}.embeddings.LayerNorm.{w}'
SCREAMING_SNAKE_CASE__ = state_dict[param_name]
# Transformer Blocks #
SCREAMING_SNAKE_CASE__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'lm_head.dense.{w}']
SCREAMING_SNAKE_CASE__ = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.ln_f.{w}']
SCREAMING_SNAKE_CASE__ = state_dict["lm_head.weight"]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 46 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
def lowercase__(A , A ) ->Optional[Any]:
"""simple docstring"""
assert x is not None
assert y is not None
lowercase__ : int= len(A )
lowercase__ : Optional[int]= len(A )
# declaring the array for storing the dp values
lowercase__ : Dict= [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowercase__ : Any= 1 if x[i - 1] == y[j - 1] else 0
lowercase__ : Dict= max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowercase__ : Optional[int]= ""
lowercase__, lowercase__ : List[str]= m, n
while i > 0 and j > 0:
lowercase__ : Optional[Any]= 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowercase__ : List[Any]= x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
a : str = """AGGTAB"""
a : Optional[int] = """GXTXAYB"""
a : Optional[int] = 4
a : Optional[int] = """GTAB"""
a , a : Tuple = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 150 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.