code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def snake_case_ ( A_ : str, A_ : str, **A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(A_, **A_ )
_lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_config(A_ )
model.save_pretrained(A_ )
AutoTokenizer.from_pretrained(A_ ).save_pretrained(A_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 72 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE_:int = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
A : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def _lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token, repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
A : Optional[int] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[Any] = FlaxBertModel(_A )
model.push_to_hub("""test-model-flax""", use_auth_token=self._token )
A : List[Any] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : str = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A, repo_id="""test-model-flax""", push_to_hub=_A, use_auth_token=self._token )
A : Any = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : str = flatten_dict(unfreeze(model.params ) )
A : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
def _lowerCAmelCase ( self ):
A : List[Any] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[Any] = FlaxBertModel(_A )
model.push_to_hub("""valid_org/test-model-flax-org""", use_auth_token=self._token )
A : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : str = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A, repo_id="""valid_org/test-model-flax-org""", push_to_hub=_A, use_auth_token=self._token )
A : int = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : Dict = flatten_dict(unfreeze(model.params ) )
A : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = True
A : int = flatten_dict(modela.params )
A : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A : Optional[Any] = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : List[Any] = FlaxBertModel(_A )
A : Optional[int] = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ) )
with self.assertRaises(_A ):
A : str = FlaxBertModel.from_pretrained(_A )
A : Tuple = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : Dict = FlaxBertModel(_A )
A : str = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ), max_shard_size="""10KB""" )
with self.assertRaises(_A ):
A : int = FlaxBertModel.from_pretrained(_A )
A : str = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def _lowerCAmelCase ( self ):
A : str = "bert"
A : Any = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(_A ):
A : Dict = FlaxBertModel.from_pretrained(_A )
A : List[str] = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase ( self ):
A : Any = "bert"
A : Tuple = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(_A ):
A : Dict = FlaxBertModel.from_pretrained(_A )
A : str = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
| 116 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = int(A__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(A__ )
__lowercase = divmod(A__ , 2 )
return binary_recursive(A__ ) + str(A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = str(A__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
__lowercase = "-" if number.startswith('''-''' ) else ""
__lowercase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F"{negative}0b{binary_recursive(int(A__ ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 104 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowerCamelCase ( yaml.SafeLoader ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ : List[str] = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
A_ : Optional[int] = Counter(_A )
A_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->int:
'''simple docstring'''
A_ : Optional[int] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ : List[Any] = full_content[1:].index('''---''' ) + 1
A_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( A__ ):
"""simple docstring"""
# class attributes
snake_case = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
with open(_A , encoding='''utf-8''' ) as readme_file:
A_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if path.exists():
with open(_A , encoding='''utf-8''' ) as readme_file:
A_ : int = readme_file.read()
else:
A_ : Any = None
A_ : int = self._to_readme(_A )
with open(_A , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(_A )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None )->int:
'''simple docstring'''
if readme_content is not None:
A_ : Optional[Any] = _split_yaml_from_readme(_A )
A_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
A_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : str = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ : Any = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def _snake_case ( self )->str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCamelCase = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
UpperCamelCase = ap.parse_args()
UpperCamelCase = Path(args.readme_filepath)
UpperCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 186 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = " " ) -> Tuple:
snake_case : List[str] = []
snake_case : List[str] = 0
for index, char in enumerate(lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case : Dict = index + 1
elif index + 1 == len(lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 124 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = [0 for i in range(len(UpperCAmelCase ) )]
# initialize interval's left pointer and right pointer
lowercase__ : List[str] = 0, 0
for i in range(1 , len(UpperCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__ : List[str] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__ : str = min_edge
while go_next(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ : Tuple = i, i + z_result[i] - 1
return z_result
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return i + z_result[i] < len(UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__ : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=1_3 , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Any=9_9 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_2 , SCREAMING_SNAKE_CASE_ : Any=5 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=6_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : int = use_token_type_ids
lowerCAmelCase_ : Optional[int] = use_labels
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Dict = scope
lowerCAmelCase_ : int = q_groups
lowerCAmelCase_ : Tuple = k_groups
lowerCAmelCase_ : List[Any] = v_groups
lowerCAmelCase_ : Tuple = post_attention_groups
lowerCAmelCase_ : int = intermediate_groups
lowerCAmelCase_ : List[Any] = output_groups
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase_ : Any = model(_A , _A )
lowerCAmelCase_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase_ : List[str] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase_ : Dict = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
lowerCAmelCase_ : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Optional[Any] = self.num_labels
lowerCAmelCase_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase_ : Optional[int] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : List[Any] = self.num_choices
lowerCAmelCase_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[int] = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(lowerCAmelCase_) : Dict = config_and_inputs
lowerCAmelCase_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( A__, A__, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = SqueezeBertModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=_A , dim=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : str = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
lowerCAmelCase_ : List[Any] = model(_A )[0]
lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , _A )
lowerCAmelCase_ : int = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(_A , _A , atol=1E-4 ) )
| 224 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=__lowerCamelCase )
env_command_parser(subparsers=__lowerCamelCase )
launch_command_parser(subparsers=__lowerCamelCase )
tpu_command_parser(subparsers=__lowerCamelCase )
test_command_parser(subparsers=__lowerCamelCase )
# Let's go
_SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCamelCase )
if __name__ == "__main__":
main()
| 58 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=0.9 , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =size if size is not None else {"shortest_edge": 30}
UpperCAmelCase : int =crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCAmelCase : Optional[Any] =parent
UpperCAmelCase : Any =batch_size
UpperCAmelCase : int =num_channels
UpperCAmelCase : Any =min_resolution
UpperCAmelCase : Union[str, Any] =max_resolution
UpperCAmelCase : int =do_resize_and_center_crop
UpperCAmelCase : Union[str, Any] =size
UpperCAmelCase : List[str] =crop_pct
UpperCAmelCase : str =crop_size
UpperCAmelCase : List[str] =do_normalize
UpperCAmelCase : Dict =image_mean
UpperCAmelCase : Any =image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( A__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''crop_pct''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
UpperCAmelCase : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Union[str, Any] =image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Dict =image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Dict =image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__A =datasets.logging.get_logger(__name__)
__A ='''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__A ='''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__A ='''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__A ={
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def snake_case__ ( self : str , a_ : Any ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__UpperCAmelCase : str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__UpperCAmelCase : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__UpperCAmelCase : List[Any] = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__UpperCAmelCase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__UpperCAmelCase : List[str] = score.BleurtScorer(os.path.join(_A , _A ) )
def snake_case__ ( self : str , a_ : List[Any] , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.scorer.score(references=_A , candidates=_A )
return {"scores": scores}
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__SCREAMING_SNAKE_CASE : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_A , cache_dir=_A )
__SCREAMING_SNAKE_CASE : int = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A )[0] , '''snapshots''' ) )]
__SCREAMING_SNAKE_CASE : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_A )
__SCREAMING_SNAKE_CASE : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Tuple = 4
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : int = pipeline.prepare_inputs(_A )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Any = replicate(_A )
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = shard(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(_A , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
__SCREAMING_SNAKE_CASE : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_A )
__SCREAMING_SNAKE_CASE : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = 50
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : str = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(_A )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : List[str] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_A )
__SCREAMING_SNAKE_CASE : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 50
__SCREAMING_SNAKE_CASE : List[str] = jax.device_count()
__SCREAMING_SNAKE_CASE : Optional[int] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : int = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(_A , _A )
__SCREAMING_SNAKE_CASE : str = shard(_A )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Tuple = 50
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Dict = replicate(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.split(_A , _A )
__SCREAMING_SNAKE_CASE : Tuple = shard(_A )
__SCREAMING_SNAKE_CASE : List[str] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_A , steps_offset=1 , )
__SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.create_state()
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_state
__SCREAMING_SNAKE_CASE : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = 50
__SCREAMING_SNAKE_CASE : List[str] = jax.device_count()
__SCREAMING_SNAKE_CASE : Dict = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : List[str] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shard(_A )
__SCREAMING_SNAKE_CASE : str = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Any = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , _A )
__SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_A , )
__SCREAMING_SNAKE_CASE : str = replicate(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : List[Any] = shard(_A )
__SCREAMING_SNAKE_CASE : List[str] = pipeline(_A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : Any = pipeline.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : int = shard(_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(_A , _A , _A , jit=_A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 303 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : int | str ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(A_ )
return n == n[::-1]
def snake_case_ ( A_ : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : Dict = 0
for i in range(1, A_ ):
if is_palindrome(A_ ) and is_palindrome(bin(A_ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 72 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE_:Tuple = NewType("""DataClass""", Any)
SCREAMING_SNAKE_CASE_:Tuple = NewType("""DataClassType""", Any)
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Tuple = {str(_lowerCAmelCase ): choice for choice in choices}
return lambda _lowerCAmelCase : str_to_choice.get(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( *,
_lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = dataclasses.MISSING , _lowerCAmelCase = dataclasses.MISSING , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[int]:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A : Optional[int] = {}
if aliases is not None:
A : Dict = aliases
if help is not None:
A : int = help
return dataclasses.field(metadata=_lowerCAmelCase , default=_lowerCAmelCase , default_factory=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( A__ ):
'''simple docstring'''
__lowerCamelCase : Dict = 42
def __init__( self, lowerCamelCase__, **lowerCamelCase__ ):
if "formatter_class" not in kwargs:
A : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**_A )
if dataclasses.is_dataclass(_A ):
A : Tuple = [dataclass_types]
A : int = list(_A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = f'''--{field.name}'''
A : Dict = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, _A ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
A : Optional[Any] = kwargs.pop("""aliases""", [] )
if isinstance(_A, _A ):
A : str = [aliases]
A : str = getattr(field.type, """__origin__""", field.type )
if origin_type is Union or (hasattr(_A, """UnionType""" ) and isinstance(_A, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_A ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f''' Problem encountered in field \'{field.name}\'.''' )
if type(_A ) not in field.type.__args__:
# filter `str` in Union
A : Any = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A : Tuple = getattr(field.type, """__origin__""", field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A : int = (
field.type.__args__[0] if isinstance(_A, field.type.__args__[1] ) else field.type.__args__[1]
)
A : Dict = getattr(field.type, """__origin__""", field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A : Tuple = {}
if origin_type is Literal or (isinstance(field.type, _A ) and issubclass(field.type, _A )):
if origin_type is Literal:
A : Tuple = field.type.__args__
else:
A : Any = [x.value for x in field.type]
A : Tuple = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
A : Dict = field.default
else:
A : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A : Optional[Any] = copy(_A )
# Hack because type=bool in argparse does not behave as we want.
A : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A : Union[str, Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
A : str = "?"
# This is the value that will get picked if we do --field_name (without value)
A : int = True
elif isclass(_A ) and issubclass(_A, _A ):
A : Union[str, Any] = field.type.__args__[0]
A : Optional[int] = "+"
if field.default_factory is not dataclasses.MISSING:
A : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
A : str = True
else:
A : Dict = field.type
if field.default is not dataclasses.MISSING:
A : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
A : Optional[Any] = field.default_factory()
else:
A : List[str] = True
parser.add_argument(_A, *_A, **_A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A : Dict = False
parser.add_argument(f'''--no_{field.name}''', action="""store_false""", dest=field.name, **_A )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if hasattr(_A, """_argument_group_name""" ):
A : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
A : Optional[int] = self
try:
A : Dict[str, type] = get_type_hints(_A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_A ):
A : Optional[Any] = ".".join(map(_A, sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_A ):
if not field.init:
continue
A : int = type_hints[field.name]
self._parse_dataclass_field(_A, _A )
def _lowerCAmelCase ( self, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=None, ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A : Tuple = []
if args_filename:
args_files.append(Path(_A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_A, type=_A, action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
A : Dict = args_file_parser.parse_known_args(args=_A )
A : Union[str, Any] = vars(_A ).get(args_file_flag.lstrip("""-""" ), _A )
if cmd_args_file_paths:
args_files.extend([Path(_A ) for p in cmd_args_file_paths] )
A : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:]
A : str = self.parse_known_args(args=_A )
A : Tuple = []
for dtype in self.dataclass_types:
A : List[Any] = {f.name for f in dataclasses.fields(_A ) if f.init}
A : Optional[Any] = {k: v for k, v in vars(_A ).items() if k in keys}
for k in keys:
delattr(_A, _A )
A : List[str] = dtype(**_A )
outputs.append(_A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = False ):
A : str = set(args.keys() )
A : Tuple = []
for dtype in self.dataclass_types:
A : List[str] = {f.name for f in dataclasses.fields(_A ) if f.init}
A : Tuple = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A : Dict = dtype(**_A )
outputs.append(_A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(_A )}''' )
return tuple(_A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = False ):
with open(Path(_A ), encoding="""utf-8""" ) as open_json_file:
A : Any = json.loads(open_json_file.read() )
A : List[str] = self.parse_dict(_A, allow_extra_keys=_A )
return tuple(_A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = False ):
A : Optional[int] = self.parse_dict(yaml.safe_load(Path(_A ).read_text() ), allow_extra_keys=_A )
return tuple(_A )
| 116 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase_ (A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_A )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self._create_example_records()
__lowercase = Dataset.from_list(_A )
self.assertListEqual(dset.column_names ,['''col_1''', '''col_2'''] )
for i, r in enumerate(_A ):
self.assertDictEqual(_A ,example_records[i] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self._create_example_records()
__lowercase = Dataset.from_list(_A )
__lowercase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info ,dset_from_dict.info )
def SCREAMING_SNAKE_CASE ( self : Tuple ): # checks what happens with missing columns
__lowercase = [{"col_1": 1}, {"col_2": "x"}]
__lowercase = Dataset.from_list(_A )
self.assertDictEqual(dset[0] ,{'''col_1''': 1} )
self.assertDictEqual(dset[1] ,{'''col_1''': None} ) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE ( self : Any ): # checks if the type can be inferred from the second record
__lowercase = [{"col_1": []}, {"col_1": [1, 2]}]
__lowercase = Dataset.from_list(_A )
self.assertEqual(dset.info.features['''col_1'''] ,Sequence(Value('''int64''' ) ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = Dataset.from_list([] )
self.assertEqual(len(_A ) ,0 )
self.assertListEqual(dset.column_names ,[] )
| 104 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
UpperCamelCase = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 186 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> str:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
snake_case : Optional[Any] = img
snake_case : Union[str, Any] = img.shape[1]
snake_case : Tuple = img.shape[0]
snake_case : Optional[Any] = dst_width
snake_case : Optional[int] = dst_height
snake_case : Dict = self.src_w / self.dst_w
snake_case : str = self.src_h / self.dst_h
snake_case : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_5_5
)
def UpperCAmelCase ( self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
snake_case : List[str] = self.img[self.get_y(_A )][self.get_x(_A )]
def UpperCAmelCase ( self , A ) -> str:
return int(self.ratio_x * x )
def UpperCAmelCase ( self , A ) -> Dict:
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase : Dict = 8_0_0, 6_0_0
lowerCamelCase : Dict = imread('image_data/lena.jpg', 1)
lowerCamelCase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 124 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a: List[str] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__a: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ : Optional[int] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ : List[str] = f"{src_lang}-{tgt_lang}"
lowerCAmelCase_ : List[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowerCAmelCase_ : Any = os.path.join(lowerCAmelCase__ , 'README.md' )
print(f"Generating {path}" )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
lowercase__ : Any = Path(__file__).resolve().parent.parent.parent
lowercase__ : Optional[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ : int = model_name.split("""-""")
lowercase__ : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 224 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class a_ :
'''simple docstring'''
def __init__( self , A=None , A=None ) -> List[str]:
_SCREAMING_SNAKE_CASE = list(poly_a or [0] )[:]
_SCREAMING_SNAKE_CASE = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_SCREAMING_SNAKE_CASE = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_SCREAMING_SNAKE_CASE = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_SCREAMING_SNAKE_CASE = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_SCREAMING_SNAKE_CASE = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_SCREAMING_SNAKE_CASE = self.__multiply()
def snake_case_( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_A ) <= 1:
return dft[0]
#
_SCREAMING_SNAKE_CASE = self.c_max_length // 2
while next_ncol > 0:
_SCREAMING_SNAKE_CASE = [[] for i in range(_A )]
_SCREAMING_SNAKE_CASE = self.root**next_ncol
# First half of next step
_SCREAMING_SNAKE_CASE = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_SCREAMING_SNAKE_CASE = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_SCREAMING_SNAKE_CASE = new_dft
_SCREAMING_SNAKE_CASE = next_ncol // 2
return dft[0]
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.__dft("""A""" )
_SCREAMING_SNAKE_CASE = self.__dft("""B""" )
_SCREAMING_SNAKE_CASE = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_SCREAMING_SNAKE_CASE = 2
while next_ncol <= self.c_max_length:
_SCREAMING_SNAKE_CASE = [[] for i in range(_A )]
_SCREAMING_SNAKE_CASE = self.root ** (next_ncol // 2)
_SCREAMING_SNAKE_CASE = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_SCREAMING_SNAKE_CASE = new_inverse_c
next_ncol *= 2
# Unpack
_SCREAMING_SNAKE_CASE = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = "A = " + " + ".join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_SCREAMING_SNAKE_CASE = "B = " + " + ".join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_SCREAMING_SNAKE_CASE = "A*B = " + " + ".join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class __snake_case ( A__ ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : Tuple = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase : Optional[int] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase : Union[str, Any] =getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase : Optional[int] =add_prefix_space
UpperCAmelCase : Optional[int] =pre_tok_class(**_A )
UpperCAmelCase : List[Any] =add_prefix_space
UpperCAmelCase : Optional[int] ="post_processor"
UpperCAmelCase : Optional[int] =getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase : Dict =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : Tuple =tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase : Optional[Any] =tuple(state['''cls'''] )
UpperCAmelCase : Optional[int] =False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase : List[Any] =add_prefix_space
UpperCAmelCase : int =True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase : List[Any] =trim_offsets
UpperCAmelCase : List[Any] =True
if changes_to_apply:
UpperCAmelCase : int =getattr(_A , state.pop('''type''' ) )
UpperCAmelCase : str =component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase : Union[str, Any] =value
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : Any =kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int =kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict =[self.sep_token_id]
UpperCAmelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 348 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCAmelCase__ ( A__ ):
'''simple docstring'''
UpperCamelCase = """realm"""
def __init__( self : Optional[Any] , a_ : int=3_05_22 , a_ : Union[str, Any]=7_68 , a_ : Dict=1_28 , a_ : Optional[int]=12 , a_ : List[str]=12 , a_ : Optional[int]=8 , a_ : Tuple=30_72 , a_ : Optional[int]="gelu_new" , a_ : Tuple=0.1 , a_ : Optional[int]=0.1 , a_ : Optional[int]=5_12 , a_ : Optional[int]=2 , a_ : Any=0.0_2 , a_ : Tuple=1e-12 , a_ : Union[str, Any]=2_56 , a_ : int=10 , a_ : Tuple=1e-3 , a_ : Tuple=5 , a_ : List[str]=3_20 , a_ : List[Any]=13_35_37_18 , a_ : Optional[Any]=50_00 , a_ : List[Any]=1 , a_ : int=0 , a_ : str=2 , **a_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = retriever_proj_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : str = num_candidates
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[Any] = span_hidden_size
__UpperCAmelCase : Any = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : Dict = reader_beam_size
__UpperCAmelCase : Any = reader_seq_len
# Retrieval config
__UpperCAmelCase : int = num_block_records
__UpperCAmelCase : Tuple = searcher_beam_size
| 226 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
import numpy
# List of input, output pairs
lowercase_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowercase_ = [2, 4, 1, 5]
lowercase_ = len(train_data)
lowercase_ = 0.009
def a__ ( snake_case , snake_case="train" ):
"""simple docstring"""
return calculate_hypothesis_value(snake_case , snake_case ) - output(
snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
for i in range(len(snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( snake_case , snake_case=m ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(snake_case ):
if index == -1:
summation_value += _error(snake_case )
else:
summation_value += _error(snake_case ) * train_data[i][0][index]
return summation_value
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = summation_of_cost_derivative(snake_case , snake_case ) / m
return cost_derivative_value
def a__ ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__SCREAMING_SNAKE_CASE : List[str] = 0.00_0002
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : str = 0
while True:
j += 1
__SCREAMING_SNAKE_CASE : Optional[Any] = [0, 0, 0, 0]
for i in range(0 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_cost_derivative(i - 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case , snake_case , atol=snake_case , rtol=snake_case , ):
break
__SCREAMING_SNAKE_CASE : Union[str, Any] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ):
"""simple docstring"""
for i in range(len(snake_case ) ):
print(('''Actual output value:''', output(snake_case , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 303 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import math
def snake_case_ ( A_ : float, A_ : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(A_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 72 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : str = None
A : Any = None
A : Any = graph
self._normalize_graph(_A, _A )
A : str = len(_A )
A : Optional[Any] = None
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if sources is int:
A : Union[str, Any] = [sources]
if sinks is int:
A : List[str] = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
A : List[str] = sources[0]
A : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
A : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.insert(0, 0 )
self.graph.insert(0, [0] * size )
for i in sources:
A : int = max_input_flow
A : Optional[Any] = 0
A : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A : str = max_input_flow
A : Dict = size - 1
def _lowerCAmelCase ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = algorithm(self )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : int = flow_network
A : str = flow_network.verticesCount
A : Dict = flow_network.sourceIndex
A : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A : Optional[int] = flow_network.graph
A : List[Any] = False
def _lowerCAmelCase ( self ):
if not self.executed:
self._algorithm()
A : Union[str, Any] = True
def _lowerCAmelCase ( self ):
pass
class SCREAMING_SNAKE_CASE__ ( A__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
super().__init__(_A )
# use this to save your result
A : Union[str, Any] = -1
def _lowerCAmelCase ( self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class SCREAMING_SNAKE_CASE__ ( A__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
super().__init__(_A )
A : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
A : Optional[Any] = [0] * self.verticies_count
A : Tuple = [0] * self.verticies_count
def _lowerCAmelCase ( self ):
A : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A : Optional[int] = 0
while i < len(_A ):
A : Optional[Any] = vertices_list[i]
A : Dict = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0, vertices_list.pop(_A ) )
A : Union[str, Any] = 0
else:
i += 1
A : Union[str, Any] = sum(self.preflow[self.source_index] )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A, _A )
self.relabel(_A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = min(
self.excesses[from_index], self.graph[from_index][to_index] - self.preflow[from_index][to_index], )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A : Any = self.heights[to_index]
if min_height is not None:
A : Optional[Any] = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = [0]
SCREAMING_SNAKE_CASE_:str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE_:Any = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE_:List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE_:str = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 116 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if len(A__ ) <= 1:
return [tuple(A__ )]
__lowercase = []
def generate(A__ , A__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , A__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase = arr[k - 1], arr[i]
else: # k is odd
__lowercase = arr[k - 1], arr[0]
generate(k - 1 , A__ )
generate(len(A__ ) , A__ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 104 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Optional[Any] = tempfile.mkdtemp()
A_ : Optional[int] = BlipImageProcessor()
A_ : Tuple = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
A_ : Union[str, Any] = BlipProcessor(_A , _A )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _snake_case ( self )->Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : List[Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
A_ : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : List[Any] = BlipProcessor(tokenizer=_A , image_processor=_A )
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(_A , return_tensors='''np''' )
A_ : Tuple = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = BlipProcessor(tokenizer=_A , image_processor=_A )
A_ : List[str] = "lower newer"
A_ : int = processor(text=_A )
A_ : List[Any] = tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : str = BlipProcessor(tokenizer=_A , image_processor=_A )
A_ : Tuple = "lower newer"
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Any = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : str = BlipProcessor(tokenizer=_A , image_processor=_A )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_A )
A_ : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : int = BlipProcessor(tokenizer=_A , image_processor=_A )
A_ : str = "lower newer"
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(text=_A , images=_A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 186 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class __lowercase (A__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> Dict:
super().__init__(*_A , **_A )
snake_case : Any = {}
def UpperCAmelCase ( self , A , *A , **A ) -> Union[str, Any]:
snake_case : List[Any] = super().add_tokens(_A , *_A , **_A )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCAmelCase ( self , A , *A , A=1 , **A ) -> List[Any]:
snake_case : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(_A , *_A , **_A )
output.append(_A )
else:
snake_case : int = []
for i in range(_A ):
snake_case : Tuple = placeholder_token + f"""_{i}"""
self.try_adding_tokens(_A , *_A , **_A )
output.append(_A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
snake_case : Union[str, Any] = output
def UpperCAmelCase ( self , A , A=False , A=1.0 ) -> Tuple:
if isinstance(_A , _A ):
snake_case : Dict = []
for i in range(len(_A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case : str = self.token_map[placeholder_token]
snake_case : Dict = tokens[: 1 + int(len(_A ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case : List[str] = copy.copy(_A )
random.shuffle(_A )
snake_case : List[str] = text.replace(_A , """ """.join(_A ) )
return text
def __call__( self , A , *A , A=False , A=1.0 , **A ) -> int:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_A , vector_shuffle=_A , prop_tokens_to_load=_A ) , *_A , **_A , )
def UpperCAmelCase ( self , A , *A , A=False , A=1.0 , **A ) -> str:
return super().encode(
self.replace_placeholder_tokens_in_text(
_A , vector_shuffle=_A , prop_tokens_to_load=_A ) , *_A , **_A , )
| 124 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCamelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ : str = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCamelCase ( ):
assert _test_patching.open is open
lowercase__ : Any = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCamelCase ( ):
lowercase__ : int = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , '''pandas.read_csv''' , UpperCAmelCase ):
pass
def __UpperCamelCase ( ):
lowercase__ : int = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , UpperCAmelCase ) is None
with patch_submodule(_test_patching , '''len''' , UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = "__test_patch_submodule_start_and_stop_mock__"
lowercase__ : Union[str, Any] = patch_submodule(_test_patching , '''open''' , UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCamelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ : Tuple = "__test_patch_submodule_successive_join__"
lowercase__ : List[Any] = "__test_patch_submodule_successive_dirname__"
lowercase__ : List[str] = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCamelCase ( ):
lowercase__ : Dict = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , UpperCAmelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , UpperCAmelCase ):
pass
| 198 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
"""simple docstring"""
from collections import defaultdict
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase__ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase__ )
return ret
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = 1_0, 9
lowercase__ : Optional[int] = defaultdict(list)
lowercase__ : dict[int, bool] = {}
lowercase__ : list[int] = []
lowercase__ : Optional[Any] = 0
lowercase__ : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 224 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=() , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]="no" , __lowerCamelCase : Optional[int]="29500" ) ->List[str]:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
_SCREAMING_SNAKE_CASE = True
elif "IPython" in sys.modules:
_SCREAMING_SNAKE_CASE = "google.colab" in str(sys.modules["""IPython"""].get_ipython() )
try:
_SCREAMING_SNAKE_CASE = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , __lowerCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = PrepareForLaunch(__lowerCamelCase , distributed_type="""TPU""" )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*__lowerCamelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port=__lowerCamelCase , mixed_precision=__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = PrepareForLaunch(__lowerCamelCase , distributed_type="""MULTI_GPU""" )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_SCREAMING_SNAKE_CASE = "1"
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=() , __lowerCamelCase : Any=2 ) ->Any:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
_SCREAMING_SNAKE_CASE = PrepareForLaunch(__lowerCamelCase , debug=__lowerCamelCase )
start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
| 58 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
__snake_case = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple:
'''simple docstring'''
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
UpperCAmelCase : List[Any] =int(__lowerCAmelCase )
UpperCAmelCase : Dict =""
UpperCAmelCase : Any =False
if decimal < 0:
UpperCAmelCase : Optional[Any] =True
decimal *= -1
while decimal > 0:
UpperCAmelCase : List[Any] =divmod(__lowerCAmelCase , 16 )
UpperCAmelCase : List[Any] =values[remainder] + hexadecimal
UpperCAmelCase : Optional[int] ="0x" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] ="-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A__ ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Any = PegasusTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def snake_case__ ( self : Union[str, Any] , **a_ : Optional[int] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def snake_case__ ( self : List[Any] , a_ : Union[str, Any] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_A ) , 11_03 )
def snake_case__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[int] = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
__UpperCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Union[str, Any] = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : List[str] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__UpperCAmelCase : Dict = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__UpperCAmelCase : str = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : Dict = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ["This is going to be way too long." * 1_50, "short example"]
__UpperCAmelCase : Any = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : Tuple = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
@slow
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A__ ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Optional[Any] = PegasusTokenizer(_A , offset=0 , mask_token_sent=_A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def snake_case__ ( self : List[str] , **a_ : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def snake_case__ ( self : List[str] , a_ : str ):
'''simple docstring'''
return ("This is a test", "This is a test")
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[Any] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
__UpperCAmelCase : Dict = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
@require_torch
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ["This is going to be way too long." * 10_00, "short example"]
__UpperCAmelCase : Dict = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : Any = self._large_tokenizer(_A ).input_ids
self.assertListEqual(
_A , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar("""T""")
def a__ ( snake_case ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( snake_case ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( snake_case ):
"""simple docstring"""
return (2 * position) + 2
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : list[tuple[T, int]] = []
__SCREAMING_SNAKE_CASE : dict[T, int] = {}
__SCREAMING_SNAKE_CASE : int = 0
def __len__( self : List[str] ):
"""simple docstring"""
return self.elements
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self : str , _A : T , _A : int ):
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__SCREAMING_SNAKE_CASE : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.heap[0]
self._bubble_down(_A )
return elem
def UpperCAmelCase__ ( self : Union[str, Any] , _A : T , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.position_map[elem]
__SCREAMING_SNAKE_CASE : int = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE : int = get_parent_position(_A )
__SCREAMING_SNAKE_CASE : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A )
else:
self._bubble_down(_A )
else:
self._bubble_down(_A )
def UpperCAmelCase__ ( self : List[Any] , _A : T ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE : Tuple = get_parent_position(_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE : str = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A , _A )
return self._bubble_up(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] , _A : T ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.position_map[elem]
__SCREAMING_SNAKE_CASE : str = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE : Optional[int] = get_child_left_position(_A )
__SCREAMING_SNAKE_CASE : int = get_child_right_position(_A )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE : str = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE : List[Any] = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE : Tuple = nodea_pos
__SCREAMING_SNAKE_CASE : List[Any] = nodea_pos
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
__SCREAMING_SNAKE_CASE : int = 0
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(self.connections )
def __len__( self : str ):
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self : Tuple , _A : T ):
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE : Optional[int] = {}
self.nodes += 1
def UpperCAmelCase__ ( self : Tuple , _A : T , _A : T , _A : int ):
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = weight
def a__ ( snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : dict[T, int] = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE : dict[T, T | None] = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(snake_case , snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE : Optional[Any] = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case , dist[neighbour] )
__SCREAMING_SNAKE_CASE : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case , dist[neighbour] )
__SCREAMING_SNAKE_CASE : int = node
return dist, parent
| 303 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __snake_case ( A__):
snake_case__ : Dict = 4_2
snake_case__ : Any = 4_2
snake_case__ : Any = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 72 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
return getitem, k
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
return delitem, k
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> int:
"""simple docstring"""
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE_:int = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE_:List[str] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE_:Optional[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE_:Optional[int] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE_:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE_:List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : int = HashMap(initial_block_size=4 )
A : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
A : Optional[Any] = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
A : Optional[Any] = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("""_""" )
A : Union[str, Any] = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
A : Dict = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 116 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase__ = 10
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
for i in range(A__ , A__ ):
if array[i] == target:
return i
return -1
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = len(A__ )
while left <= right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(A__ , one_third - 1 , A__ , A__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , A__ , A__ , A__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , A__ , A__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by comma:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase__ = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCAmelCase__ = ite_ternary_search(collection, target)
lowerCAmelCase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 104 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
UpperCamelCase = False
UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return TrainCommand(SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( A__ ):
"""simple docstring"""
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : List[Any] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=_A , required=_A , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_A , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=_A , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=_A , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=_A , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=_A , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_A , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=_A , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=_A , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=_A , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=_A , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=_A , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=_A , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_A )
def __init__( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = logging.get_logger('''transformers-cli/training''' )
A_ : Any = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=_A )
A_ : Union[str, Any] = args.output
A_ : Optional[int] = args.column_label
A_ : Optional[int] = args.column_text
A_ : int = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
A_ : str = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
A_ : Dict = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A_ : Tuple = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
A_ : Optional[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A_ : str = args.validation_split
A_ : int = args.train_batch_size
A_ : Union[str, Any] = args.valid_batch_size
A_ : Any = args.learning_rate
A_ : Union[str, Any] = args.adam_epsilon
def _snake_case ( self )->str:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _snake_case ( self )->Tuple:
'''simple docstring'''
raise NotImplementedError
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 186 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase : int = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
return torch.atana(lowercase ,lowercase ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase ,lowercase )
class __lowercase (A__ ):
"""simple docstring"""
pass
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A ) -> Optional[Any]:
super().__init__()
snake_case : int = DiffusionAttnUnetaD(_A , n_attn_layers=4 )
snake_case : List[Any] = deepcopy(self.diffusion )
snake_case : int = torch.quasirandom.SobolEngine(1 , scramble=_A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : int = MODELS_MAP[model_name]["url"]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase : int = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase : Optional[int] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase : Tuple = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase : Any = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
if name.startswith("""skip""" ):
return name.replace("""skip""" ,RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase ,lowercase ):
return name.replace(lowercase ,lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase ,lowercase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=13 ) -> Any:
snake_case : str = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" ,"""time_proj""" )
snake_case : Tuple = 0
if string.startswith("""net.3.""" ):
depth += 1
snake_case : Dict = string[6:]
elif string.startswith("""net.""" ):
snake_case : Optional[Any] = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
snake_case : Union[str, Any] = string[7:]
if string.startswith("""main.""" ):
snake_case : Tuple = string[5:]
# mid block
if string[:2].isdigit():
snake_case : Any = string[:2]
snake_case : Optional[int] = string[2:]
else:
snake_case : int = string[0]
snake_case : Dict = string[1:]
if depth == max_depth:
snake_case : List[str] = MID_NUM_TO_LAYER[layer_num]
snake_case : Tuple = "mid_block"
elif depth > 0 and int(lowercase ) < 7:
snake_case : Dict = DOWN_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = f"""down_blocks.{depth}"""
elif depth > 0 and int(lowercase ) > 7:
snake_case : List[Any] = UP_NUM_TO_LAYER[layer_num]
snake_case : Union[str, Any] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
snake_case : Tuple = DEPTH_0_TO_LAYER[layer_num]
snake_case : int = f"""up_blocks.{max_depth - 1}""" if int(lowercase ) > 3 else "down_blocks.0"
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
snake_case : List[str] = string_left[1:]
if "resnets" in new_layer:
snake_case : str = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : str = convert_attn_naming(lowercase )
snake_case : Optional[int] = new_string_left
if not isinstance(lowercase ,lowercase ):
snake_case : int = prefix + "." + new_layer + "." + string_left
else:
snake_case : List[str] = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : int = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : List[Any] = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase ,lowercase ):
snake_case : List[str] = transform_conv_attns(lowercase ,lowercase ,lowercase )
else:
snake_case : Dict = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : str = v[:, :, 0]
else:
# bias
snake_case : Optional[int] = v
else:
# qkv matrices
snake_case : List[Any] = v.shape[0]
snake_case : Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case : List[str] = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
snake_case : str = download(lowercase )
snake_case : Dict = MODELS_MAP[model_name]["sample_rate"]
snake_case : List[Any] = MODELS_MAP[model_name]["sample_size"]
snake_case : str = Object()
snake_case : Any = sample_size
snake_case : Dict = sample_rate
snake_case : Any = 0
snake_case : Any = UNetaDModel(sample_size=lowercase ,sample_rate=lowercase )
snake_case : Union[str, Any] = diffusers_model.state_dict()
snake_case : Optional[Any] = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path ,map_location=lowercase )["""state_dict"""] )
snake_case : Optional[Any] = orig_model.diffusion_ema.eval()
snake_case : Union[str, Any] = orig_model.state_dict()
snake_case : int = rename_orig_weights(lowercase )
snake_case : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(lowercase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
snake_case : int = value.squeeze()
snake_case : Optional[int] = value
diffusers_model.load_state_dict(lowercase )
snake_case : int = 100
snake_case : List[Any] = 33
snake_case : Union[str, Any] = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Optional[Any] = torch.manual_seed(lowercase )
snake_case : Dict = torch.randn([1, 2, config.sample_size] ,generator=lowercase ).to(lowercase )
snake_case : Optional[int] = torch.linspace(1 ,0 ,steps + 1 ,device=lowercase )[:-1]
snake_case : Dict = get_crash_schedule(lowercase )
snake_case : int = DanceDiffusionPipeline(unet=lowercase ,scheduler=lowercase )
snake_case : Tuple = torch.manual_seed(33 )
snake_case : Optional[Any] = pipe(num_inference_steps=lowercase ,generator=lowercase ).audios
snake_case : Optional[int] = sampling.iplms_sample(lowercase ,lowercase ,lowercase ,{} )
snake_case : List[str] = generated.clamp(-1 ,1 )
snake_case : str = (generated - audio).abs().sum()
snake_case : Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" ,lowercase )
print("""Diff max""" ,lowercase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase : Union[str, Any] = parser.parse_args()
main(args)
| 124 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a: Optional[Any] = logging.get_logger(__name__)
__a: Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a: int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__a: Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__a: Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = RoFormerTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Optional[Any]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
lowercase__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , _A ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , _A ) != strip_accents
):
lowercase__ : Union[str, Any] = getattr(_A , pre_tok_state.pop('''type''' ) )
lowercase__ : Any = do_lower_case
lowercase__ : List[str] = strip_accents
lowercase__ : str = pre_tok_class(**_A )
lowercase__ : List[str] = do_lower_case
def __getstate__( self ) -> Optional[int]:
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : int = d
lowercase__ : List[str] = self.__dict__["_tokenizer"].get_vocab()
lowercase__ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> Any:
lowercase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[Any]:
lowercase__ : Optional[Any] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Optional[int]:
lowercase__ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A , _A , _A , _A , **_A )
| 198 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 5_1_2,
'''google/realm-cc-news-pretrained-encoder''': 5_1_2,
'''google/realm-cc-news-pretrained-scorer''': 5_1_2,
'''google/realm-cc-news-pretrained-openqa''': 5_1_2,
'''google/realm-orqa-nq-openqa''': 5_1_2,
'''google/realm-orqa-nq-reader''': 5_1_2,
'''google/realm-orqa-wq-openqa''': 5_1_2,
'''google/realm-orqa-wq-reader''': 5_1_2,
}
lowercase__ : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = RealmTokenizer
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple="[UNK]" , SCREAMING_SNAKE_CASE_ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE_ : List[str]="[PAD]" , SCREAMING_SNAKE_CASE_ : int="[CLS]" , SCREAMING_SNAKE_CASE_ : Dict="[MASK]" , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
lowerCAmelCase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(_A , normalizer_state.pop('type' ) )
lowerCAmelCase_ : int = do_lower_case
lowerCAmelCase_ : Union[str, Any] = strip_accents
lowerCAmelCase_ : int = tokenize_chinese_chars
lowerCAmelCase_ : str = normalizer_class(**_A )
lowerCAmelCase_ : Tuple = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : str = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : Dict = text
lowerCAmelCase_ : List[str] = kwargs.pop('text_pair' , _A )
lowerCAmelCase_ : Optional[int] = kwargs.pop('return_tensors' , _A )
lowerCAmelCase_ : int = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_A ):
if batch_text_pair is not None:
lowerCAmelCase_ : Optional[Any] = batch_text_pair[idx]
else:
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = super().__call__(_A , _A , return_tensors=_A , **_A )
lowerCAmelCase_ : Optional[int] = encoded_candidates.get('input_ids' )
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('attention_mask' )
lowerCAmelCase_ : Optional[int] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_A )
lowerCAmelCase_ : Any = {key: item for key, item in output_data.items() if len(_A ) != 0}
return BatchEncoding(_A , tensor_type=_A )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
lowerCAmelCase_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
lowerCAmelCase_ : Any = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 224 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
'''simple docstring'''
import os
def lowerCamelCase ( ) ->Optional[Any]:
with open(os.path.dirname(__lowerCamelCase ) + """/p022_names.txt""" ) as file:
_SCREAMING_SNAKE_CASE = str(file.readlines()[0] )
_SCREAMING_SNAKE_CASE = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_SCREAMING_SNAKE_CASE = 0
return total_score
if __name__ == "__main__":
print(solution())
| 58 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__snake_case = '''▁'''
class __snake_case ( A__ ):
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = BigBirdTokenizer
__lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : Union[str, Any] = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase : int =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase : Dict =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase : str =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
UpperCAmelCase : int =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase : List[str] =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] =AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
UpperCAmelCase : Optional[int] =vocab_file
UpperCAmelCase : Union[str, Any] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[self.sep_token_id]
UpperCAmelCase : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : List[str] =os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 348 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_UpperCAmelCase ) )
def a ( ):
'''simple docstring'''
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_UpperCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 226 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = StableUnCLIPPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 32
__SCREAMING_SNAKE_CASE : str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=_A )
__SCREAMING_SNAKE_CASE : str = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL()
__SCREAMING_SNAKE_CASE : List[str] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Optional[Any]=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__SCREAMING_SNAKE_CASE : Any = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe('''anime turle''' , generator=_A , output_type='''np''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 303 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : int = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.dummy_uncond_unet
_lowerCamelCase : Optional[Any] = DDIMScheduler()
_lowerCamelCase : Optional[int] = self.dummy_vq_model
_lowerCamelCase : Optional[int] = LDMPipeline(unet=_A , vqvae=_A , scheduler=_A )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : str = ldm(generator=_A , num_inference_steps=2 , output_type='''numpy''' ).images
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = ldm(generator=_A , num_inference_steps=2 , output_type='''numpy''' , return_dict=_A )[0]
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Optional[Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
_lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = ldm(generator=_A , num_inference_steps=5 , output_type='''numpy''' ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowerCamelCase : Optional[int] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : List[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 72 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = ['''BeitFeatureExtractor''']
SCREAMING_SNAKE_CASE_:Union[str, Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase = 3
__lowercase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 104 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )->List[Any]:
'''simple docstring'''
A_ : int = parent
A_ : str = batch_size
A_ : Any = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = is_training
A_ : Optional[Any] = use_labels
A_ : str = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Dict = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : str = initializer_range
A_ : Optional[int] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : str = (image_size // patch_size) ** 2
A_ : int = num_patches + 1
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = TFViTModel(config=_A )
A_ : List[Any] = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ : List[Any] = self.image_size // 2
A_ : Tuple = pixel_values[:, :, :image_size, :image_size]
A_ : Optional[int] = model(_A , interpolate_pos_encoding=_A , training=_A )
A_ : Optional[int] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Optional[int] = self.type_sequence_label_size
A_ : List[str] = TFViTForImageClassification(_A )
A_ : List[str] = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ : List[str] = self.image_size // 2
A_ : Tuple = pixel_values[:, :, :image_size, :image_size]
A_ : Optional[Any] = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : List[str] = 1
A_ : List[Any] = TFViTForImageClassification(_A )
A_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Optional[int] = self.prepare_config_and_inputs()
A_ : List[str] = config_and_inputs
A_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
snake_case = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Optional[Any] = TFViTModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def _snake_case ( self )->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _snake_case ( self )->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _snake_case ( self )->int:
'''simple docstring'''
pass
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(_A )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : str = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
A_ : Optional[Any] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : Dict = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
A_ : Dict = model(**_A )
# verify the logits
A_ : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
A_ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
| 186 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000000 ,lowercase = 10 ) -> Any:
snake_case : defaultdict = defaultdict(lowercase )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
snake_case : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 124 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase ( UpperCAmelCase=None ):
if subparsers is not None:
lowercase__ : int = subparsers.add_parser('''env''' )
else:
lowercase__ : Dict = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = torch.__version__
lowercase__ : List[str] = torch.cuda.is_available()
lowercase__ : Optional[Any] = is_xpu_available()
lowercase__ : str = is_npu_available()
lowercase__ : int = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
lowercase__ : List[str] = load_config_from_file(args.config_file ).to_dict()
lowercase__ : str = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(UpperCAmelCase ),
"PyTorch NPU available": str(UpperCAmelCase ),
"System RAM": F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowercase__ : Optional[Any] = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
lowercase__ : Optional[int] = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else F"""\t{accelerate_config}"""
)
print(UpperCAmelCase )
lowercase__ : Dict = accelerate_config
return info
def __UpperCamelCase ( ):
lowercase__ : Any = env_command_parser()
lowercase__ : Optional[Any] = parser.parse_args()
env_command(UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 198 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Any=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : str=None , ):
lowerCAmelCase_ : List[Any] = size if size is not None else {"shortest_edge": 1_8}
lowerCAmelCase_ : str = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : Any = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Union[str, Any] = do_resize
lowerCAmelCase_ : int = size
lowerCAmelCase_ : Dict = do_normalize
lowerCAmelCase_ : Optional[Any] = image_mean
lowerCAmelCase_ : Tuple = image_std
lowerCAmelCase_ : List[str] = crop_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( A__, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : str = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'do_center_crop' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowerCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCAmelCase_ : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_A )
for video in video_inputs:
self.assertIsInstance(_A , _A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCAmelCase_ : int = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for video in video_inputs:
self.assertIsInstance(_A , _A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for video in video_inputs:
self.assertIsInstance(_A , _A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 224 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class a_ :
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class a_ :
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = tree
def snake_case_( self , A ) -> List[str]:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Optional[Any]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str =1
UpperCAmelCase : Optional[Any] =3
UpperCAmelCase : Union[str, Any] =(32, 32)
UpperCAmelCase : List[Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : int =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_A )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
def extract(*snake_case__ , **snake_case__ ):
class __snake_case :
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =torch.ones([0] )
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
self.pixel_values.to(_A )
return self
return Out()
return extract
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Optional[int] =self.dummy_cond_unet
UpperCAmelCase : Optional[int] =PNDMScheduler(skip_prk_steps=_A )
UpperCAmelCase : int =self.dummy_vae
UpperCAmelCase : Dict =self.dummy_text_encoder
UpperCAmelCase : str =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase : Dict =77
UpperCAmelCase : Optional[Any] =self.dummy_image.to(_A )
UpperCAmelCase : Union[str, Any] =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase : Any =AltDiffusionImgaImgPipeline(
unet=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , safety_checker=_A , feature_extractor=self.dummy_extractor , )
UpperCAmelCase : Optional[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_A )
UpperCAmelCase : str =alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Union[str, Any] ="A painting of a squirrel eating a burger"
UpperCAmelCase : Optional[Any] =torch.Generator(device=_A ).manual_seed(0 )
UpperCAmelCase : Optional[Any] =alt_pipe(
[prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_A , )
UpperCAmelCase : List[str] =output.images
UpperCAmelCase : int =torch.Generator(device=_A ).manual_seed(0 )
UpperCAmelCase : str =alt_pipe(
[prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_A , return_dict=_A , )[0]
UpperCAmelCase : List[str] =image[0, -3:, -3:, -1]
UpperCAmelCase : Dict =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Dict =np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.dummy_cond_unet
UpperCAmelCase : Optional[int] =PNDMScheduler(skip_prk_steps=_A )
UpperCAmelCase : List[Any] =self.dummy_vae
UpperCAmelCase : str =self.dummy_text_encoder
UpperCAmelCase : str =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase : Tuple =77
UpperCAmelCase : Tuple =self.dummy_image.to(_A )
# put models in fp16
UpperCAmelCase : Dict =unet.half()
UpperCAmelCase : str =vae.half()
UpperCAmelCase : int =bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase : int =AltDiffusionImgaImgPipeline(
unet=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , safety_checker=_A , feature_extractor=self.dummy_extractor , )
UpperCAmelCase : List[str] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_A )
UpperCAmelCase : Any =alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Any ="A painting of a squirrel eating a burger"
UpperCAmelCase : Tuple =torch.manual_seed(0 )
UpperCAmelCase : int =alt_pipe(
[prompt] , generator=_A , num_inference_steps=2 , output_type='''np''' , image=_A , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase : int =init_image.resize((760, 504) )
UpperCAmelCase : Tuple ="BAAI/AltDiffusion"
UpperCAmelCase : Dict =AltDiffusionImgaImgPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase : int ="A fantasy landscape, trending on artstation"
UpperCAmelCase : Optional[Any] =torch.manual_seed(0 )
UpperCAmelCase : str =pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , generator=_A , output_type='''np''' , )
UpperCAmelCase : List[str] =output.images[0]
UpperCAmelCase : List[Any] =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase : Optional[int] =np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase : Optional[int] =init_image.resize((768, 512) )
UpperCAmelCase : int =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
UpperCAmelCase : List[Any] ="BAAI/AltDiffusion"
UpperCAmelCase : List[Any] =AltDiffusionImgaImgPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase : Tuple ="A fantasy landscape, trending on artstation"
UpperCAmelCase : Optional[int] =torch.manual_seed(0 )
UpperCAmelCase : Any =pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , generator=_A , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = """dict"""
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" ,init=A__ ,repr=A__ )
def __call__( self : Tuple ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case__ ( self : Any ):
'''simple docstring'''
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = """dict"""
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" ,init=A__ ,repr=A__ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Dict ):
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def snake_case__ ( self : List[Any] , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set(self.languages )
if self.languages and set(_A ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(_A ) - lang_set ) )}) are not in valid set ({", ".join(_A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(_A , _A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase : Optional[int] = zip(*sorted(_A ) )
return {"language": languages, "translation": translations}
def snake_case__ ( self : Dict ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( snake_case ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def a__ ( snake_case , snake_case , snake_case , snake_case=70_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
__SCREAMING_SNAKE_CASE : Any = np.dot(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = sigmoid_function(snake_case )
__SCREAMING_SNAKE_CASE : Any = np.dot(x.T , h - y ) / y.size
__SCREAMING_SNAKE_CASE : Optional[Any] = theta - alpha * gradient # updating the weights
__SCREAMING_SNAKE_CASE : List[str] = np.dot(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : List[str] = sigmoid_function(snake_case )
__SCREAMING_SNAKE_CASE : int = cost_function(snake_case , snake_case )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase_ = datasets.load_iris()
lowercase_ = iris.data[:, :2]
lowercase_ = (iris.target != 0) * 1
lowercase_ = 0.1
lowercase_ = logistic_reg(alpha, x, y, max_iterations=70_000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def a__ ( snake_case ):
"""simple docstring"""
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
(lowercase_) = (x[:, 0].min(), x[:, 0].max())
(lowercase_) = (x[:, 1].min(), x[:, 1].max())
(lowercase_) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase_ = np.c_[xxa.ravel(), xxa.ravel()]
lowercase_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 303 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[str]=3_2 , __lowerCAmelCase : List[Any]=1_6 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=3_2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Any=[0, 1, 2, 3] , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : str=[1, 3_8_4, 2_4, 2_4] , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Tuple = backbone_out_indices
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : int = num_labels
_lowerCamelCase : Optional[Any] = backbone_featmap_shape
_lowerCamelCase : str = scope
_lowerCamelCase : Any = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[Any] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DPTModel(config=_A )
model.to(_A )
model.eval()
_lowerCamelCase : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : Optional[Any] = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
_lowerCamelCase : Tuple = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Dict = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
_lowerCamelCase : Any = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : str = config_and_inputs
_lowerCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( A__ , A__ , unittest.TestCase):
snake_case__ : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
snake_case__ : Optional[int] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : Tuple = False
snake_case__ : Optional[int] = False
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = DPTModelTester(self )
_lowerCamelCase : int = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(_A )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Tuple = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = True
if model_class in get_values(_A ):
continue
_lowerCamelCase : List[Any] = model_class(_A )
model.to(_A )
model.train()
_lowerCamelCase : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
_lowerCamelCase : Dict = model(**_A ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[int] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
_lowerCamelCase : Tuple = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : Tuple = self._prepare_for_class(_A , _A , return_labels=_A )
_lowerCamelCase : Tuple = model(**_A ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = _config_zero_init(_A )
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(config=_A )
# Skip the check for the backbone
_lowerCamelCase : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCamelCase : List[str] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCamelCase : Any = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = "add"
with self.assertRaises(_A ):
_lowerCamelCase : Any = DPTForDepthEstimation(_A )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_lowerCamelCase : Dict = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_A )
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**_A )
_lowerCamelCase : Tuple = outputs.predicted_depth
# verify the predicted depth
_lowerCamelCase : int = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , _A )
_lowerCamelCase : Any = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , _A , atol=1E-4 ) )
| 72 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_:int = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : str = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_:List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = list(s_dict.keys() )
for key in keys:
A : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A : List[Any] = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f'''{key} -> {new_key}''' )
A : List[str] = s_dict.pop(_lowerCAmelCase )
return s_dict
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : int = emb.weight.shape
A : Optional[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A : Optional[Any] = emb.weight.data
return lin_layer
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
A : List[Any] = os.path.basename(_lowerCAmelCase )
A : List[str] = url.split("""/""" )[-2]
A : str = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_lowerCAmelCase ):
A : Dict = open(_lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
A : int = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
A : Any = open(_lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
A : Dict = _download(_MODELS[checkpoint_path] )
else:
A : Any = torch.load(_lowerCAmelCase , map_location="""cpu""" )
A : List[str] = original_checkpoint["dims"]
A : Any = original_checkpoint["model_state_dict"]
A : Optional[Any] = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
A : int = True
A : Union[str, Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
A : Optional[int] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A : Tuple = WhisperForConditionalGeneration(_lowerCAmelCase )
A : List[str] = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
A : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A : Optional[int] = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE_:str = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 116 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ ):
"""simple docstring"""
__lowercase = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
__lowercase = 1024
__lowercase = 4096
__lowercase = 24
__lowercase = 16
__lowercase = [5, 11, 17, 23]
__lowercase = [256, 512, 1024, 1024]
__lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__lowercase = 768
__lowercase = [1, 1, 1, 0.5]
__lowercase = [256, 512, 768, 768]
__lowercase = 150
__lowercase = 16
__lowercase = (1, 384, 384)
__lowercase = False
__lowercase = "project"
if "ade" in checkpoint_url:
__lowercase = True
__lowercase = 768
__lowercase = [1, 1, 1, 0.5]
__lowercase = 150
__lowercase = 16
__lowercase = "huggingface/label-files"
__lowercase = "ade20k-id2label.json"
__lowercase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='''dataset''' ) ) , '''r''' ) )
__lowercase = {int(A__ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = [1, 150, 480, 480]
return config, expected_shape
def _A ( A__ ):
"""simple docstring"""
__lowercase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__lowercase = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__lowercase = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
__lowercase = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__lowercase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__lowercase = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__lowercase = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__lowercase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
__lowercase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
__lowercase = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__lowercase = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__lowercase = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__lowercase = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__lowercase = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__lowercase = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__lowercase = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__lowercase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__lowercase = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__lowercase = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__lowercase = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__lowercase = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__lowercase = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__lowercase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__lowercase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__lowercase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__lowercase = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__lowercase = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__lowercase = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__lowercase = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__lowercase = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
__lowercase = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
__lowercase = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
__lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__lowercase = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
__lowercase = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
__lowercase = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
__lowercase = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
__lowercase = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
__lowercase = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _A ( A__ , A__ ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
__lowercase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def _A ( ):
"""simple docstring"""
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = get_dpt_config(A__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowercase = torch.load(A__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(A__ )
__lowercase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowercase = DPTForSemanticSegmentation(A__ ) if "ade" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowercase = 480 if "ade" in checkpoint_url else 384
__lowercase = DPTImageProcessor(size=A__ )
__lowercase = prepare_img()
__lowercase = image_processor(A__ , return_tensors='''pt''' )
# forward pass
__lowercase = model(**A__ ).logits if "ade" in checkpoint_url else model(**A__ ).predicted_depth
if show_prediction:
__lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=A__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A__ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 104 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ : str = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(SCREAMING_SNAKE_CASE ) )
return round(SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class __lowercase (A__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> Union[str, Any]:
super().__init__(**_A )
snake_case : Any = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : Optional[int] = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[int] = patch_size
snake_case : Tuple = qkv_bias
snake_case : str = frequency_stride
snake_case : int = time_stride
snake_case : Any = max_length
snake_case : Optional[int] = num_mel_bins
| 124 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=2 , lowercase=56 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=2 , lowercase=7 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=4 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=2 , lowercase=3 , ) -> List[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
lowerCamelCase_ = rescale_embeddings
lowerCamelCase_ = attention_type
lowerCamelCase_ = use_bias
lowerCamelCase_ = block_size
lowerCamelCase_ = num_random_blocks
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
lowerCamelCase_ = model_class(lowercase )
@jax.jit
def model_jitted(lowercase , lowercase=None , **lowercase ):
return model(input_ids=lowercase , attention_mask=lowercase , **lowercase )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ = model_jitted(**lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ = model_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase=1e-5 , lowercase="outputs" , lowercase=None ) -> Optional[int]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__A ='''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowerCamelCase_ ( lowerCamelCase__ = "mumbai" ):
lowerCamelCase_ = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
lowerCamelCase_ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
lowerCamelCase_ = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=2 , lowercase=3 , lowercase=16 , lowercase=[1, 2, 1] , lowercase=[2, 2, 4] , lowercase=2 , lowercase=2.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=True , lowercase=0.0_2 , lowercase=1e-5 , lowercase=True , lowercase=None , lowercase=True , lowercase=10 , lowercase=8 , lowercase=["stage1", "stage2", "stage3"] , lowercase=[1, 2, 3] , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
lowerCamelCase_ = ["stem"]
lowerCamelCase_ = MaskFormerSwinBackbone(config=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = MaskFormerSwinModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("Swin does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
@unittest.skip("Swin does not support feedforward chunking" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase ):
lowerCamelCase_ = 0
return t
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
with torch.no_grad():
lowerCamelCase_ = model(**lowercase , return_dict=lowercase , **lowercase )
lowerCamelCase_ = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has'
f' `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}.'
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"output_hidden_states": True} )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"output_hidden_states": True} )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case_ ):
lowerCAmelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowerCamelCase_ = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
lowerCamelCase_ = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCamelCase_ = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCamelCase_ = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( ):
lowerCamelCase_ = 2
while True:
if is_prime(lowerCamelCase__ ):
yield num
num += 1
def lowerCamelCase_ ( lowerCamelCase__ = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda lowerCamelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
import argparse
import os
import re
import packaging.version
__A ='''examples/'''
__A ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A ={
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__A ='''README.md'''
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.read()
lowerCamelCase_ , lowerCamelCase_ = REPLACE_PATTERNS[pattern]
lowerCamelCase_ = replace.replace("VERSION" , lowerCamelCase__ )
lowerCamelCase_ = re_pattern.sub(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
for folder, directories, fnames in os.walk(lowerCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , pattern="examples" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not patch:
update_version_in_examples(lowerCamelCase__ )
def lowerCamelCase_ ( ):
lowerCamelCase_ = "🤗 Transformers currently provides the following architectures"
lowerCamelCase_ = "1. Want to contribute a new model?"
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
# Find the start of the list.
lowerCamelCase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCamelCase_ = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
def lowerCamelCase_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCamelCase_ = f.read()
lowerCamelCase_ = REPLACE_PATTERNS["init"][0].search(lowerCamelCase__ ).groups()[0]
return packaging.version.parse(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__=False ):
lowerCamelCase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCamelCase_ = default_version.base_version
elif patch:
lowerCamelCase_ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCamelCase_ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCamelCase_ = input(F'Which version are you releasing? [{default_version}]' )
if len(lowerCamelCase__ ) == 0:
lowerCamelCase_ = default_version
print(F'Updating version to {version}.' )
global_version_update(lowerCamelCase__ , patch=lowerCamelCase__ )
def lowerCamelCase_ ( ):
lowerCamelCase_ = get_version()
lowerCamelCase_ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCamelCase_ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase_ = input(F'Which version are we developing now? [{dev_version}]' )
if len(lowerCamelCase__ ) == 0:
lowerCamelCase_ = dev_version
print(F'Updating version to {version}.' )
global_version_update(lowerCamelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False ):
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for i in range(config.num_hidden_layers ):
lowerCamelCase_ = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=lowerCamelCase__ )
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
if "vqa" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = 3_1_2_9
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "vqa2-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = ViltForQuestionAnswering(lowerCamelCase__ )
elif "nlvr" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = 2
lowerCamelCase_ = {0: "False", 1: "True"}
lowerCamelCase_ = {v: k for k, v in config.idalabel.items()}
lowerCamelCase_ = 3
lowerCamelCase_ = ViltForImagesAndTextClassification(lowerCamelCase__ )
elif "irtr" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = ViltForImageAndTextRetrieval(lowerCamelCase__ )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = ViltForMaskedLM(lowerCamelCase__ )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["state_dict"]
lowerCamelCase_ = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
if mlm_model or irtr_model:
lowerCamelCase_ = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase__ )
# Define processor
lowerCamelCase_ = ViltImageProcessor(size=3_8_4 )
lowerCamelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase_ = ViltProcessor(lowerCamelCase__ , lowerCamelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
lowerCamelCase_ = processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase_ = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=lowerCamelCase__ ).raw )
if mlm_model:
lowerCamelCase_ = "a bunch of [MASK] laying on a [MASK]."
else:
lowerCamelCase_ = "How many cats are there?"
lowerCamelCase_ = processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
# Verify outputs
if mlm_model:
lowerCamelCase_ = torch.Size([1, 1_1, 3_0_5_2_2] )
lowerCamelCase_ = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase__ , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCamelCase_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase_ = torch.Size([1, 3_1_2_9] )
lowerCamelCase_ = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase__ , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase_ = torch.Size([1, 2] )
lowerCamelCase_ = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A ={
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
import datasets
from .evaluate import evaluate
__A ='''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
__A ='''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
__A ='''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCamelCase_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase_ ( lowerCamelCase__ ):
return EnvironmentCommand()
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
lowerCamelCase_ = parser.add_parser("env" )
download_parser.set_defaults(func=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = "not installed"
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = "not installed"
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = "not installed"
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowercase ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> List[Any]:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = DDIMPipeline
lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = {"unet": unet, "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> Any:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = "cpu"
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCamelCase_ = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "google/ddpm-cifar10-32"
lowerCamelCase_ = UNetaDModel.from_pretrained(lowercase )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = ddim(generator=lowercase , eta=0.0 , output_type="numpy" ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "google/ddpm-ema-bedroom-256"
lowerCamelCase_ = UNetaDModel.from_pretrained(lowercase )
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase )
lowerCamelCase_ = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = ddpm(generator=lowercase , output_type="numpy" ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(lowerCamelCase__ , lowerCamelCase__ )
# Predict target for test data
lowerCamelCase_ = xgb.predict(lowerCamelCase__ )
lowerCamelCase_ = predictions.reshape(len(lowerCamelCase__ ) , 1 )
return predictions
def lowerCamelCase_ ( ):
lowerCamelCase_ = fetch_california_housing()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 , random_state=1 )
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(lowerCamelCase__ , lowerCamelCase__ )}' )
print(F'Mean Square Error : {mean_squared_error(lowerCamelCase__ , lowerCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A =object()
# For specifying empty leaf dict `{}`
__A =object()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
lowerCamelCase_ = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def lowerCamelCase_ ( lowerCamelCase__ ):
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = _get_partition_rules()
lowerCamelCase_ = _replacement_rules(lowerCamelCase__ )
lowerCamelCase_ = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
lowerCamelCase_ = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 19 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 1 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['vqvae']
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
return 50 if isinstance(self.scheduler , lowercase ) else 1000
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
lowerCamelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
lowerCamelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCamelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCamelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
lowerCamelCase_ = noise
lowerCamelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
lowerCamelCase_ = self.mel.audio_slice_to_image(lowercase )
lowerCamelCase_ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowerCamelCase_ = (input_image / 255) * 2 - 1
lowerCamelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCamelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
lowerCamelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
lowerCamelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCamelCase_ = int(mask_start_secs * pixels_per_second )
lowerCamelCase_ = int(mask_end_secs * pixels_per_second )
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
lowerCamelCase_ = self.unet(lowercase , lowercase , lowercase )["sample"]
else:
lowerCamelCase_ = self.unet(lowercase , lowercase )["sample"]
if isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )["prev_sample"]
else:
lowerCamelCase_ = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowerCamelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCamelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCamelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCamelCase_ = self.vqvae.decode(lowercase )["sample"]
lowerCamelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCamelCase_ = (images * 255).round().astype("uint8" )
lowerCamelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode="RGB" ).convert("L" ) for _ in images) )
lowerCamelCase_ = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
lowerCamelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCamelCase_ = (sample / 255) * 2 - 1
lowerCamelCase_ = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCamelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCamelCase_ = self.scheduler.alphas_cumprod[t]
lowerCamelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCamelCase_ = 1 - alpha_prod_t
lowerCamelCase_ = self.unet(lowercase , lowercase )["sample"]
lowerCamelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCamelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCamelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase , lowercase ) -> torch.Tensor:
lowerCamelCase_ = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
import numpy as np
from PIL import Image
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = np.array(lowerCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# compute the shape of the output matrix
lowerCamelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
return updated_arr
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = np.array(lowerCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# compute the shape of the output matrix
lowerCamelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A =Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
from typing import Any
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = data
lowerCamelCase_ = None
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> List[Any]:
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.head
while temp is not None:
print(temp.data , end=" " )
lowerCamelCase_ = temp.next
print()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
lowerCamelCase_ = Node(lowercase )
lowerCamelCase_ = self.head
lowerCamelCase_ = new_node
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
if node_data_a == node_data_a:
return
else:
lowerCamelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase_ = node_a.next
lowerCamelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase_ = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase_ , lowerCamelCase_ = node_a.data, node_a.data
if __name__ == "__main__":
__A =LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'umt5'
lowerCAmelCase__ = ['past_key_values']
def __init__( self , lowercase=250112 , lowercase=512 , lowercase=64 , lowercase=1024 , lowercase=8 , lowercase=None , lowercase=6 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1e-6 , lowercase=1.0 , lowercase="gated-gelu" , lowercase=True , lowercase=True , lowercase="T5Tokenizer" , lowercase=True , lowercase=0 , lowercase=1 , lowercase=0 , **lowercase , ) -> List[Any]:
super().__init__(
is_encoder_decoder=lowercase , tokenizer_class=lowercase , tie_word_embeddings=lowercase , pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = d_kv
lowerCamelCase_ = d_ff
lowerCamelCase_ = num_layers
lowerCamelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase_ = num_heads
lowerCamelCase_ = relative_attention_num_buckets
lowerCamelCase_ = relative_attention_max_distance
lowerCamelCase_ = dropout_rate
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = feed_forward_proj
lowerCamelCase_ = use_cache
lowerCamelCase_ = self.feed_forward_proj.split("-" )
lowerCamelCase_ = act_info[-1]
lowerCamelCase_ = act_info[0] == "gated"
if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCamelCase_ = "gelu_new"
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.d_model
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self.num_heads
@property
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return self.num_layers
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
lowerCamelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCamelCase_ = "past_encoder_sequence + sequence"
lowerCamelCase_ = {0: "batch"}
lowerCamelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE_( self ) -> int:
return 13
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 5e-4
| 19 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__A ='''bert-base-cased'''
__A ='''google/pegasus-xsum'''
__A =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__A =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__A ='''patrickvonplaten/t5-tiny-random'''
__A ='''sshleifer/bart-tiny-random'''
__A ='''sshleifer/tiny-mbart'''
__A ='''sshleifer/tiny-marian-en-de'''
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = "\n".join(lowerCamelCase__ )
Path(lowerCamelCase__ ).open("w" ).writelines(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCamelCase__ , F'{split}.source' ) , lowerCamelCase__ )
_dump_articles(os.path.join(lowerCamelCase__ , F'{split}.target' ) , lowerCamelCase__ )
return tmp_dir
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
lowerCamelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase_ = max(len(tokenizer.encode(lowercase ) ) for a in ARTICLES )
lowerCamelCase_ = max(len(tokenizer.encode(lowercase ) ) for a in SUMMARIES )
lowerCamelCase_ = 4
lowerCamelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase_ , lowerCamelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCamelCase_ = SeqaSeqDataset(
lowercase , data_dir=lowercase , type_path="train" , max_source_length=lowercase , max_target_length=lowercase , src_lang=lowercase , tgt_lang=lowercase , )
lowerCamelCase_ = DataLoader(lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase , lowercase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
lowerCamelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase_ = max(len(tokenizer.encode(lowercase ) ) for a in ARTICLES )
lowerCamelCase_ = max(len(tokenizer.encode(lowercase ) ) for a in SUMMARIES )
lowerCamelCase_ = 4
lowerCamelCase_ = LegacySeqaSeqDataset(
lowercase , data_dir=lowercase , type_path="train" , max_source_length=20 , max_target_length=lowercase , )
lowerCamelCase_ = DataLoader(lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCamelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCamelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase , lowercase , 128 , lowercase )
lowerCamelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCamelCase_ = {x.name for x in save_dir.iterdir()}
lowerCamelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase ) < len(lowercase )
assert len(lowercase ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_dataset(max_len=64 )
lowerCamelCase_ = 64
lowerCamelCase_ = ds.make_dynamic_sampler(lowercase , required_batch_size_multiple=lowercase )
lowerCamelCase_ = [len(lowercase ) for x in batch_sampler]
assert len(set(lowercase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase ) == len(lowercase ) # no dropped or added examples
lowerCamelCase_ = DataLoader(lowercase , batch_sampler=lowercase , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase_ = []
lowerCamelCase_ = []
for batch in data_loader:
lowerCamelCase_ = batch["input_ids"].shape
lowerCamelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(lowercase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase )
assert num_src_per_batch[0] == max(lowercase )
if failures:
raise AssertionError(f'too many tokens in {len(lowercase )} batches' )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_dataset(max_len=512 )
lowerCamelCase_ = 2
lowerCamelCase_ = ds.make_sortish_sampler(lowercase , shuffle=lowercase )
lowerCamelCase_ = DataLoader(lowercase , batch_size=lowercase , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase_ = DataLoader(lowercase , batch_size=lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase )
lowerCamelCase_ = tokenizer.pad_token_id
def count_pad_tokens(lowercase , lowercase="input_ids" ):
return [batch[k].eq(lowercase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase , k="labels" ) ) < sum(count_pad_tokens(lowercase , k="labels" ) )
assert sum(count_pad_tokens(lowercase ) ) < sum(count_pad_tokens(lowercase ) )
assert len(lowercase ) == len(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase=1000 , lowercase=128 ) -> str:
if os.getenv("USE_REAL_DATA" , lowercase ):
lowerCamelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCamelCase_ = max_len * 2 * 64
if not Path(lowercase ).joinpath("train.len" ).exists():
save_len_file(lowercase , lowercase )
else:
lowerCamelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCamelCase_ = max_len * 4
save_len_file(lowercase , lowercase )
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
lowerCamelCase_ = SeqaSeqDataset(
lowercase , data_dir=lowercase , type_path="train" , max_source_length=lowercase , max_target_length=lowercase , n_obs=lowercase , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_dataset()
lowerCamelCase_ = set(DistributedSortishSampler(lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowercase ) )
lowerCamelCase_ = set(DistributedSortishSampler(lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowercase ) )
assert idsa.intersection(lowercase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
if tok_name == MBART_TINY:
lowerCamelCase_ = SeqaSeqDataset(
lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCamelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase_ = SeqaSeqDataset(
lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCamelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase ) == 1 if tok_name == BART_TINY else len(lowercase ) == 0
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=False , lowercase=True , lowercase=False , lowercase=False , lowercase=19 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = EsmForProteinFolding(config=lowercase ).float()
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = False
lowerCAmelCase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {} if is_torch_available() else {}
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = EsmFoldModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@unittest.skip("Does not support attention outputs" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("ESMFold does not support head pruning." )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@unittest.skip("ESMFold does not support head pruning." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
@unittest.skip("ESMFold does not support head pruning." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
@unittest.skip("ESMFold does not support head pruning." )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@unittest.skip("ESMFold does not support head pruning." )
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass
@unittest.skip("ESMFold only has one output format." )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
pass
@unittest.skip("ESMFold does not support input chunking." )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )["positions"]
lowerCamelCase_ = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase , atol=1e-4 ) )
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A ='''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A ='''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A ='''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
}
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.0_2 , lowercase=3 , lowercase=0.6 , lowercase=None , ) -> Tuple:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = mask_ratio
lowerCamelCase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = ViTMAEModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = ViTMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = (self.image_size // self.patch_size) ** 2
lowerCamelCase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = ViTMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = ViTMAEModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
# make masks reproducible
np.random.seed(2 )
lowerCamelCase_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase_ = torch.from_numpy(lowercase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase_ = pt_noise
super().check_pt_tf_models(lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs[0].cpu().numpy()
lowerCamelCase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
lowerCamelCase_ = model_class.from_pretrained(lowercase )
model.to(lowercase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
# Make sure we don't have nans
lowerCamelCase_ = after_outputs[0].cpu().numpy()
lowerCamelCase_ = 0
lowerCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE_( self ) -> str:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ViTMAEModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCamelCase_ ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase_ = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowercase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowercase , return_tensors="pt" ).to(lowercase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase_ = ViTMAEConfig()
lowerCamelCase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowercase , noise=torch.from_numpy(lowercase ).to(device=lowercase ) )
# verify the logits
lowerCamelCase_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCamelCase_ = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase ) , atol=1e-4 ) )
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( lowerCamelCase__ ):
return EnvironmentCommand()
def lowerCamelCase_ ( lowerCamelCase__ ):
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> str:
lowerCamelCase_ = parser.add_parser("env" )
download_parser.set_defaults(func=lowercase )
download_parser.add_argument(
"--accelerate-config_file" , default=lowercase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , *lowercase ) -> None:
lowerCamelCase_ = accelerate_config_file
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = "not installed"
if is_safetensors_available():
import safetensors
lowerCamelCase_ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCamelCase_ = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
lowerCamelCase_ = "not installed"
lowerCamelCase_ = lowerCamelCase_ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCamelCase_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowercase ):
lowerCamelCase_ = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCamelCase_ = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowercase , lowercase )
else f'\t{accelerate_config}'
)
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.__version__
try:
# deprecated in v2.1
lowerCamelCase_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCamelCase_ = bool(tf.config.list_physical_devices("GPU" ) )
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCamelCase_ = flax.__version__
lowerCamelCase_ = jax.__version__
lowerCamelCase_ = jaxlib.__version__
lowerCamelCase_ = jax.lib.xla_bridge.get_backend().platform
lowerCamelCase_ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'{safetensors_version}',
"Accelerate version": f'{accelerate_version}',
"Accelerate config": f'{accelerate_config_str}',
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})',
"Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})',
"Jax version": f'{jax_version}',
"JaxLib version": f'{jaxlib_version}',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowercase ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> List[Any]:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowerCamelCase_ ( ):
lowerCamelCase_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ = get_sagemaker_input()
else:
lowerCamelCase_ = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCamelCase__=None ):
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser("config" , description=lowerCamelCase__ )
else:
lowerCamelCase_ = argparse.ArgumentParser("Accelerate config command" , description=lowerCamelCase__ )
parser.add_argument(
"--config_file" , default=lowerCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = get_user_input()
if args.config_file is not None:
lowerCamelCase_ = args.config_file
else:
if not os.path.isdir(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowerCamelCase_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCamelCase__ )
else:
config.to_yaml_file(lowerCamelCase__ )
print(F'accelerate configuration saved at {config_file}' )
def lowerCamelCase_ ( ):
lowerCamelCase_ = config_command_parser()
lowerCamelCase_ = parser.parse_args()
config_command(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
return 1_0 - x * x
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
lowerCamelCase_ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) < 0:
lowerCamelCase_ = c
else:
lowerCamelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__A =2_0_4_8
__A =4_0_9_6
__A =4_2
__A =os.environ.pop('''PROCESS_TRAIN''', '''false''')
__A ={'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCamelCase_ ( lowerCamelCase__ ):
def choose_first(lowerCamelCase__ , lowerCamelCase__=False ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
lowerCamelCase_ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCamelCase_ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCamelCase_ = {"id": example["id"]}
lowerCamelCase_ = example["annotations"]
lowerCamelCase_ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCamelCase_ = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCamelCase_ = lowerCamelCase_ = []
lowerCamelCase_ = lowerCamelCase_ = []
lowerCamelCase_ = ["<cls>"]
else:
lowerCamelCase_ = ["short"]
lowerCamelCase_ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCamelCase_ = ["long"]
lowerCamelCase_ = choose_first(annotation["long_answer"] , is_long_answer=lowerCamelCase__ )
lowerCamelCase_ = []
answer.update(lowerCamelCase__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCamelCase_ = True
else:
lowerCamelCase_ = False
lowerCamelCase_ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowerCamelCase__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = _get_single_answer(lowerCamelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase_ = example["document"]["tokens"]
lowerCamelCase_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCamelCase__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCamelCase_ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCamelCase_ = example["document"]["tokens"]
lowerCamelCase_ = answer["start_token"]
lowerCamelCase_ = answer["end_token"]
lowerCamelCase_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCamelCase_ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCamelCase_ = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCamelCase_ = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCamelCase_ = " ".join([old[i] for i in range(len(lowerCamelCase__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCamelCase__ , end="\n" )
print("Old:" , lowerCamelCase__ , end="\n\n" )
return {
"context": " ".join(lowerCamelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=True ):
# overlap will be of doc_stride - q_len
lowerCamelCase_ = get_context_and_ans(lowerCamelCase__ , assertion=lowerCamelCase__ )
lowerCamelCase_ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCamelCase_ = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCamelCase_ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = input_ids[:q_len]
lowerCamelCase_ = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCamelCase_ = i + max_length - q_len
lowerCamelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase__ ),
"end_token": [-1_0_0] * len(lowerCamelCase__ ),
"category": category,
},
}
lowerCamelCase_ = out["context"].split()
lowerCamelCase_ = splitted_context[answer["end_token"]]
lowerCamelCase_ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCamelCase__ , ).input_ids )
lowerCamelCase_ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCamelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCamelCase_ = len(tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCamelCase_ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCamelCase_ = answer["start_token"]
lowerCamelCase_ = answer["end_token"]
if assertion:
lowerCamelCase_ = tokenizer.decode(lowerCamelCase__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCamelCase__ , end="\n\n" )
if len(lowerCamelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCamelCase_ = input_ids[:q_len]
lowerCamelCase_ = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride )
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCamelCase_ = i + max_length - q_len
lowerCamelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCamelCase_ = start_token - i + q_len
lowerCamelCase_ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCamelCase_ = -1_0_0
lowerCamelCase_ = -1_0_0
answers_category.append("null" )
lowerCamelCase_ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase__ )
answers_end_token.append(lowerCamelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCamelCase__ ) )
print("Old:" , tokenizer.decode(lowerCamelCase__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=False ):
lowerCamelCase_ = get_strided_contexts_and_ans(
lowerCamelCase__ , lowerCamelCase__ , doc_stride=lowerCamelCase__ , max_length=lowerCamelCase__ , assertion=lowerCamelCase__ , )
return example
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
with jsonlines.open(lowerCamelCase__ , "a" ) as writer:
for example in tqdm(lowerCamelCase__ , total=len(lowerCamelCase__ ) , desc="Saving samples ... " ):
lowerCamelCase_ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__A =load_dataset('''natural_questions''')
__A =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__A =data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__A ={
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__A =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__A =data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__A ='''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
import math
class _SCREAMING_SNAKE_CASE :
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> int:
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> list[list[int | float]]:
for i in range(len(lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase_ ( ):
# Training Examples ( m, n )
lowerCamelCase_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase_ = SelfOrganizingMap()
lowerCamelCase_ = 3
lowerCamelCase_ = 0.5
for _ in range(lowerCamelCase__ ):
for j in range(len(lowerCamelCase__ ) ):
# training sample
lowerCamelCase_ = training_samples[j]
# Compute the winning vector
lowerCamelCase_ = self_organizing_map.get_winner(lowerCamelCase__ , lowerCamelCase__ )
# Update the winning vector
lowerCamelCase_ = self_organizing_map.update(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# classify test sample
lowerCamelCase_ = [0, 0, 0, 1]
lowerCamelCase_ = self_organizing_map.get_winner(lowerCamelCase__ , lowerCamelCase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
from collections import deque
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase ) -> None:
lowerCamelCase_ = process_name # process name
lowerCamelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase_ = arrival_time
lowerCamelCase_ = burst_time # remaining burst time
lowerCamelCase_ = 0 # total time of the process wait in ready queue
lowerCamelCase_ = 0 # time from arrival time to completion time
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> None:
# total number of mlfq's queues
lowerCamelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase_ = time_slices
# unfinished process is in this ready_queue
lowerCamelCase_ = queue
# current time
lowerCamelCase_ = current_time
# finished process is in this sequence queue
lowerCamelCase_ = deque()
def SCREAMING_SNAKE_CASE_( self ) -> list[str]:
lowerCamelCase_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
return [q.burst_time for q in queue]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> deque[Process]:
lowerCamelCase_ = deque() # sequence deque of finished process
while len(lowercase ) != 0:
lowerCamelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase_ = 0
# set the process's turnaround time because it is finished
lowerCamelCase_ = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> tuple[deque[Process], deque[Process]]:
lowerCamelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase ) ):
lowerCamelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase_ = 0
# set the finish time
lowerCamelCase_ = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def SCREAMING_SNAKE_CASE_( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase_ , lowerCamelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__A =Process('''P1''', 0, 5_3)
__A =Process('''P2''', 0, 1_7)
__A =Process('''P3''', 0, 6_8)
__A =Process('''P4''', 0, 2_4)
__A =3
__A =[1_7, 2_5]
__A =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__A =Process('''P1''', 0, 5_3)
__A =Process('''P2''', 0, 1_7)
__A =Process('''P3''', 0, 6_8)
__A =Process('''P4''', 0, 2_4)
__A =3
__A =[1_7, 2_5]
__A =deque([Pa, Pa, Pa, Pa])
__A =MLFQ(number_of_queues, time_slices, queue, 0)
__A =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
__A ='''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__A ='''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__A ='''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
return {
"matthews_correlation": float(matthews_corrcoef(lowercase , lowercase , sample_weight=lowercase ) ),
}
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCamelCase_ = BlipaProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(lowercase , return_tensors="np" )
lowerCamelCase_ = processor(images=lowercase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=lowercase )
lowerCamelCase_ = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(lowercase )
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A =TypeVar('''T''')
def lowerCamelCase_ ( lowerCamelCase__ ):
return (position - 1) // 2
def lowerCamelCase_ ( lowerCamelCase__ ):
return (2 * position) + 1
def lowerCamelCase_ ( lowerCamelCase__ ):
return (2 * position) + 2
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ) -> None:
lowerCamelCase_ = []
lowerCamelCase_ = {}
lowerCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def SCREAMING_SNAKE_CASE_( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowerCamelCase_ = self.elements
self.elements += 1
self._bubble_up(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCamelCase_ , lowerCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCamelCase_ , lowerCamelCase_ = self.heap[0]
self._bubble_down(lowercase )
return elem
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
# Update the weight of the given key
lowerCamelCase_ = self.position_map[elem]
lowerCamelCase_ = (elem, weight)
if position > 0:
lowerCamelCase_ = get_parent_position(lowercase )
lowerCamelCase_ , lowerCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase )
else:
self._bubble_down(lowercase )
else:
self._bubble_down(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowerCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
lowerCamelCase_ = get_parent_position(lowercase )
lowerCamelCase_ , lowerCamelCase_ = self.heap[curr_pos]
lowerCamelCase_ , lowerCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_up(lowercase )
return None
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowerCamelCase_ = self.position_map[elem]
lowerCamelCase_ , lowerCamelCase_ = self.heap[curr_pos]
lowerCamelCase_ = get_child_left_position(lowercase )
lowerCamelCase_ = get_child_right_position(lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCamelCase_ , lowerCamelCase_ = self.heap[child_left_position]
lowerCamelCase_ , lowerCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
if child_left_position < self.elements:
lowerCamelCase_ , lowerCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
else:
return None
if child_right_position < self.elements:
lowerCamelCase_ , lowerCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
return None
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
# Swap the nodes at the given positions
lowerCamelCase_ = self.heap[nodea_pos][0]
lowerCamelCase_ = self.heap[nodea_pos][0]
lowerCamelCase_ , lowerCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCamelCase_ = nodea_pos
lowerCamelCase_ = nodea_pos
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ) -> None:
lowerCamelCase_ = {}
lowerCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowerCamelCase_ = {}
self.nodes += 1
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(lowercase )
self.add_node(lowercase )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def lowerCamelCase_ ( lowerCamelCase__ , ):
lowerCamelCase_ = {node: maxsize for node in graph.connections}
lowerCamelCase_ = {node: None for node in graph.connections}
lowerCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase__ , lowerCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCamelCase_ = priority_queue.extract_min()
lowerCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase__ , dist[neighbour] )
lowerCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase__ , dist[neighbour] )
lowerCamelCase_ = node
return dist, parent
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A =random.Random()
if is_torch_available():
import torch
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=1.0 , lowerCamelCase__=None , lowerCamelCase__=None ):
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=400 , lowercase=2000 , lowercase=1 , lowercase=0.0 , lowercase=16000 , lowercase=True , lowercase=True , ) -> Optional[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = do_normalize
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_( self , lowercase=False , lowercase=False ) -> Optional[Any]:
def _flatten(lowercase ):
return list(itertools.chain(*lowercase ) )
if equal_length:
lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = ASTFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test batched
lowerCamelCase_ = feat_extract(lowercase , padding=lowercase , return_tensors="np" ).input_values
lowerCamelCase_ = feat_extract(lowercase , padding=lowercase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(lowercase )
lowerCamelCase_ = feat_extract(lowercase , return_tensors="np" ).input_values
lowerCamelCase_ = feat_extract(lowercase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> int:
import torch
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
from datasets import load_dataset
lowerCamelCase_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort("id" ).select(range(lowercase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> int:
# fmt: off
lowerCamelCase_ = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = ASTFeatureExtractor()
lowerCamelCase_ = feature_extractor(lowercase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowercase , atol=1e-4 ) )
| 19 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 1 |
import numpy
# List of input, output pairs
__A =(
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
__A =(((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
__A =[2, 4, 1, 5]
__A =len(train_data)
__A =0.009
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__="train" ):
return calculate_hypothesis_value(lowerCamelCase__ , lowerCamelCase__ ) - output(
lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = 0
for i in range(len(lowerCamelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=m ):
lowerCamelCase_ = 0
for i in range(lowerCamelCase__ ):
if index == -1:
summation_value += _error(lowerCamelCase__ )
else:
summation_value += _error(lowerCamelCase__ ) * train_data[i][0][index]
return summation_value
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = summation_of_cost_derivative(lowerCamelCase__ , lowerCamelCase__ ) / m
return cost_derivative_value
def lowerCamelCase_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ = 0.00_00_02
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while True:
j += 1
lowerCamelCase_ = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase__ ) ):
lowerCamelCase_ = get_cost_derivative(i - 1 )
lowerCamelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase__ , lowerCamelCase__ , atol=lowerCamelCase__ , rtol=lowerCamelCase__ , ):
break
lowerCamelCase_ = temp_parameter_vector
print(("Number of iterations:", j) )
def lowerCamelCase_ ( ):
for i in range(len(lowerCamelCase__ ) ):
print(("Actual output value:", output(lowerCamelCase__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def lowerCamelCase_ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
lowerCamelCase_ = prime_sieve(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
from __future__ import annotations
__A =list[list[int]]
# assigning initial values to the grid
__A =[
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A =[
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( lowerCamelCase__ ):
if location := find_empty_location(lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
lowerCamelCase_ = 0
return None
def lowerCamelCase_ ( lowerCamelCase__ ):
for row in grid:
for cell in row:
print(lowerCamelCase__ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
__A =sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.