code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "sentencepiece.bpe.model"}
A_ = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
A_ = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
A_ = "▁"
class __lowercase ( lowerCAmelCase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : int="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[Any] , ) -> None:
'''simple docstring'''
lowercase = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase = len(self.sp_model ) - 1
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __a ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def __a ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def __a ( self : int , __lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(A__ )
return spm_id if spm_id else self.unk_token_id
def __a ( self : List[str] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__ )
def __a ( self : int , __lowerCamelCase : Any ) -> Any:
'''simple docstring'''
lowercase = []
lowercase = ''''''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(A__ )
lowercase = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self : List[Any] , __lowerCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 604 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 4_0_0_0_0_0_0 ) -> int:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase__ )
__lowerCamelCase : str = b, a + b
return sum(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : int=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=4 , ) -> List[str]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
__lowercase = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('albert-base-v2' )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A__ )
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__lowercase = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowercase = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase = model(A__ , attention_mask=A__ )[0]
__lowercase = (1, 11, 768)
self.assertEqual(output.shape , A__ )
__lowercase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A__ , atol=1E-4 ) )
| 375 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=36 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : Tuple = parent
__magic_name__ : int = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Dict = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Tuple = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : int = embedding_size
__magic_name__ : str = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Dict = num_hidden_groups
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Any = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Optional[Any] = num_choices
__magic_name__ : Optional[int] = scope
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_input_mask:
__magic_name__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[Any] = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : List[Any] = None
__magic_name__ : List[Any] = None
if self.use_labels:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Any = AlbertModel(config=A__ )
model.to(A__ )
model.eval()
__magic_name__ : Tuple = model(A__ , attention_mask=A__ , token_type_ids=A__ )
__magic_name__ : str = model(A__ , token_type_ids=A__ )
__magic_name__ : List[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = AlbertForPreTraining(config=A__ )
model.to(A__ )
model.eval()
__magic_name__ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , sentence_order_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = AlbertForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
__magic_name__ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
__magic_name__ : Union[str, Any] = AlbertForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
__magic_name__ : Dict = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : List[Any] = AlbertForSequenceClassification(A__ )
model.to(A__ )
model.eval()
__magic_name__ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : List[str] = self.num_labels
__magic_name__ : Optional[int] = AlbertForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
__magic_name__ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : List[Any] = self.num_choices
__magic_name__ : Optional[Any] = AlbertForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
__magic_name__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.prepare_config_and_inputs()
(
__magic_name__
) : Optional[Any] = config_and_inputs
__magic_name__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
lowercase__ : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : List[str] = True
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
__magic_name__ : List[str] = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
__magic_name__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = AlbertModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __magic_name__ ( self ) -> int:
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Any:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def __magic_name__ ( self ) -> int:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : Tuple = type
self.model_tester.create_and_check_model(*A__ )
@slow
def __magic_name__ ( self ) -> List[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = AlbertModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__magic_name__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Any = model(A__ , attention_mask=A__ )[0]
__magic_name__ : str = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , A__ )
__magic_name__ : Any = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
| 324 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 0 |
from math import pi, sqrt, tan
def lowerCamelCase_(lowerCamelCase_ ) -> Any:
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_(lowerCamelCase_ ) -> int:
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
UpperCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> str:
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowerCAmelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def lowerCamelCase_(lowerCamelCase_ ) -> str:
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
UpperCAmelCase = (sidea + sidea + sidea) / 2
UpperCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_(lowerCamelCase_ ) -> Union[str, Any]:
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> int:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \\nlength of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 323 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Any:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> str:
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase =features.copy() if features else default_expected_features
__lowerCAmelCase =(
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int ) -> List[str]:
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__lowerCAmelCase =features.copy() if features else default_expected_features
__lowerCAmelCase =(
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> List[Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__lowerCAmelCase =features.copy()
__lowerCAmelCase =(
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Any:
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ) -> Tuple:
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase =jsonl_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase =[jsonl_path]
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int=("train",) ) -> Dict:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
__lowerCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) -> List[Any]:
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase =JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase =features.copy() if features else default_expected_features
__lowerCAmelCase =(
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase =JsonDatasetReader({"""train""": jsonl_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> Optional[int]:
if split:
__lowerCAmelCase ={split: jsonl_path}
else:
__lowerCAmelCase ='''train'''
__lowerCAmelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__lowerCAmelCase =tmp_path / '''cache'''
__lowerCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase =JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] ) -> List[Any]:
return json.load(lowerCAmelCase__ )
def __lowerCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[Any]:
return [json.loads(lowerCAmelCase__ ) for line in buffer]
class __a :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)])
def UpperCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Tuple)-> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__).write()
buffer.seek(0)
__lowerCAmelCase =load_json_function(A__)
assert isinstance(A__ , A__)
assert isinstance(exported_content[0] , A__)
assert len(A__) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789"""), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Dict)-> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__).write()
buffer.seek(0)
__lowerCAmelCase =load_json(A__)
assert isinstance(A__ , A__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""") and not hasattr(exported_content[0] , """keys""")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(A__) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)])
def UpperCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Any , snake_case_ : List[str])-> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , num_proc=2).write()
buffer.seek(0)
__lowerCAmelCase =load_json_function(A__)
assert isinstance(A__ , A__)
assert isinstance(exported_content[0] , A__)
assert len(A__) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789"""), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[str])-> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ , num_proc=2).write()
buffer.seek(0)
__lowerCAmelCase =load_json(A__)
assert isinstance(A__ , A__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""") and not hasattr(exported_content[0] , """keys""")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(A__) == 10
def UpperCamelCase ( self : Dict , snake_case_ : int)-> List[str]:
with pytest.raises(A__):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , num_proc=0)
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")])
def UpperCamelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : Tuple)-> Tuple:
__lowerCAmelCase =tmp_path_factory.mktemp("""data""") / F"""test.json.{extension}"""
__lowerCAmelCase =str(shared_datadir / F"""test_file.json.{extension}""")
JsonDatasetWriter(A__ , A__ , compression=A__).write()
with fsspec.open(A__ , """rb""" , compression="""infer""") as f:
__lowerCAmelCase =f.read()
with fsspec.open(A__ , """rb""" , compression="""infer""") as f:
__lowerCAmelCase =f.read()
assert exported_content == original_content
| 354 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0_0 ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = 2**power
_SCREAMING_SNAKE_CASE : Optional[int] = 0
while n:
_SCREAMING_SNAKE_CASE : Optional[Any] = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 572 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCAmelCase__ , 0 , lowerCAmelCase__ , args=(lowerCAmelCase__) )[0]
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
return math.pow(lowerCAmelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 368 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a_ = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , a : Dict , a : Union[str, Any] , a : Any , a : int=None ) -> List[str]:
"""simple docstring"""
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __UpperCamelCase ( self : Dict , a : int ) -> Dict:
"""simple docstring"""
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
SCREAMING_SNAKE_CASE : str = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE : int = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE : Dict = dist.new_group(ranks=A__ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def __UpperCamelCase ( self : Optional[int] , a : Any , a : Union[str, Any] , a : List[Any]=torch.floataa ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.empty(A__ , dtype=A__ )
dist.scatter(A__ , src=0 , scatter_list=A__ , group=self.process_group )
return target_tensor
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE : str = next((addr for addr in addrs if addr.startswith("e" )) , A__ )
return ifname
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
SCREAMING_SNAKE_CASE : Tuple = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
# distributed training
SCREAMING_SNAKE_CASE : List[str] = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE : Optional[int] = None
if self._is_main():
SCREAMING_SNAKE_CASE : int = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A__ )]
dist.gather(torch.tensor(A__ ) , dst=0 , gather_list=A__ , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE : Optional[Any] = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Any = []
if self._is_main():
assert len(A__ ) == world_size
SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(A__ ).numpy() , A__ )
SCREAMING_SNAKE_CASE : Any = torch.tensor(A__ ), torch.tensor(A__ )
SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(A__ , A__ )
SCREAMING_SNAKE_CASE : Optional[int] = self._chunk_tensor(A__ , A__ )
SCREAMING_SNAKE_CASE : List[str] = self._scattered(A__ , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(A__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A__ ) | 25 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = [0]
__lowercase : Any = [0]
__lowercase : List[str] = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 0 )
__lowercase : Dict = [60]
__lowercase : Union[str, Any] = [10]
__lowercase : Optional[int] = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 0 )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Dict = 3
__lowercase : Tuple = [1, 2, 3]
__lowercase : List[str] = [3, 2, 1]
__lowercase : Dict = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 5 )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = 50
__lowercase : List[str] = [60, 1_00, 1_20]
__lowercase : Union[str, Any] = [10, 20, 30]
__lowercase : int = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 76 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Union[str, Any]:
__snake_case = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __UpperCAmelCase ( _UpperCAmelCase : int = 1_00 ) -> List[str]:
__snake_case = 1
__snake_case = 2
for i in range(2 , max_n + 1 ):
__snake_case = pre_numerator
__snake_case = 2 * i // 3 if i % 3 == 0 else 1
__snake_case = cur_numerator
__snake_case = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 69 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 604 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCAmelCase : str = RoFormerTokenizer
_UpperCAmelCase : int = RoFormerTokenizerFast
_UpperCAmelCase : int = True
_UpperCAmelCase : int = True
def lowerCAmelCase ( self : Dict):
super().setUp()
def lowerCAmelCase ( self : Dict ,**SCREAMING_SNAKE_CASE__ : List[str]):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**A__)
def lowerCAmelCase ( self : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**A__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase : Tuple = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : List[str] = self.get_chinese_input_output_texts()
__lowerCamelCase : Dict = tokenizer.tokenize(A__)
self.assertListEqual(A__ ,output_text.split())
__lowerCamelCase : Tuple = tokens + [tokenizer.unk_token]
__lowerCamelCase : Optional[int] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,A__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : str = self.get_rust_tokenizer()
__lowerCamelCase : Union[str, Any] = self.get_chinese_input_output_texts()
__lowerCamelCase : Tuple = tokenizer.tokenize(A__)
self.assertListEqual(A__ ,output_text.split())
__lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Any = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,A__)
def lowerCAmelCase ( self : Union[str, Any]):
pass
def lowerCAmelCase ( self : Tuple):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE_ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
SCREAMING_SNAKE_CASE_ : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
with open(lowerCAmelCase__ , 'rb' ) as f:
__lowercase = Image.open(lowerCAmelCase__ )
return im.convert('RGB' )
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase = field(default=lowerCAmelCase_ , metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCamelCase = field(default=lowerCAmelCase_ , metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCamelCase = field(
default=0.1_5 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase_ )} , )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCamelCase = field(default=lowerCAmelCase_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__UpperCamelCase = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE ( snake_case ) -> str:
__lowercase = torch.stack([example['pixel_values'] for example in examples] )
__lowercase = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowercase = {}
if data_args.train_dir is not None:
__lowercase = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowercase = os.path.join(data_args.validation_dir , '**' )
__lowercase = load_dataset(
'imagefolder' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
__lowercase = dataset['''train'''].train_test_split(data_args.train_val_split )
__lowercase = split['''train''']
__lowercase = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase = dataset['''train'''].features['''labels'''].names
__lowercase = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
__lowercase = str(lowerCAmelCase__ )
__lowercase = label
# Load the accuracy metric from the datasets package
__lowercase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowercase = image_processor.size['''shortest_edge''']
else:
__lowercase = (image_processor.size['''height'''], image_processor.size['''width'''])
__lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowercase = Compose(
[
RandomResizedCrop(lowerCAmelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowercase = Compose(
[
Resize(lowerCAmelCase__ ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case ):
__lowercase = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(snake_case ):
__lowercase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase__ )
# Initalize our trainer
__lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics('eval' , lowerCAmelCase__ )
trainer.save_metrics('eval' , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
__lowercase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 375 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__: Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( _A, _A=False ):
"""simple docstring"""
__magic_name__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def UpperCamelCase ( _A, _A, _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : Optional[int] = ''''''
else:
__magic_name__ : Optional[int] = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__magic_name__ : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : List[Any] = in_proj_bias[: config.hidden_size]
__magic_name__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : str = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = dct.pop(lowerCAmelCase__ )
__magic_name__ : Tuple = val
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ : Dict = Image.open(requests.get(lowerCAmelCase__, stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Any = DeiTConfig()
# all deit models have fine-tuned heads
__magic_name__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__magic_name__ : Dict = 1000
__magic_name__ : Union[str, Any] = '''huggingface/label-files'''
__magic_name__ : Optional[Any] = '''imagenet-1k-id2label.json'''
__magic_name__ : Tuple = json.load(open(hf_hub_download(lowerCAmelCase__, lowerCAmelCase__, repo_type="""dataset""" ), """r""" ) )
__magic_name__ : List[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__magic_name__ : int = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
__magic_name__ : List[Any] = int(deit_name[-6:-4] )
__magic_name__ : List[str] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__magic_name__ : Optional[int] = 192
__magic_name__ : Any = 768
__magic_name__ : Tuple = 12
__magic_name__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
__magic_name__ : List[Any] = 384
__magic_name__ : List[str] = 1536
__magic_name__ : int = 12
__magic_name__ : List[str] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__magic_name__ : Union[str, Any] = 1024
__magic_name__ : List[str] = 4096
__magic_name__ : Any = 24
__magic_name__ : List[str] = 16
# load original model from timm
__magic_name__ : List[str] = timm.create_model(lowerCAmelCase__, pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ : str = timm_model.state_dict()
__magic_name__ : Optional[Any] = create_rename_keys(lowerCAmelCase__, lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
# load HuggingFace model
__magic_name__ : Any = DeiTForImageClassificationWithTeacher(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__magic_name__ : Dict = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__magic_name__ : List[str] = DeiTImageProcessor(size=lowerCAmelCase__, crop_size=config.image_size )
__magic_name__ : Optional[Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
__magic_name__ : List[str] = encoding['''pixel_values''']
__magic_name__ : Tuple = model(lowerCAmelCase__ )
__magic_name__ : Tuple = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__, outputs.logits, atol=1e-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__magic_name__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__magic_name__: Any = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 324 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688 | 0 |
from math import isqrt, loga
def lowerCamelCase_(lowerCamelCase_ ) -> int:
UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = False
return [i for i in range(2 , lowerCAmelCase__ ) if is_prime[i]]
def lowerCamelCase_(lowerCamelCase_ = 800_800 , lowerCamelCase_ = 800_800 ) -> Optional[Any]:
UpperCAmelCase = degree * loga(lowerCAmelCase__ )
UpperCAmelCase = int(lowerCAmelCase__ )
UpperCAmelCase = calculate_prime_numbers(lowerCAmelCase__ )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 323 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 0 |
import os
from pathlib import Path
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ) -> Any:
__lowerCAmelCase ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase ={
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase =f"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase =f"""\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"""
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__lowerCAmelCase =os.path.join(lowerCAmelCase__ , """README.md""" )
print(f"""Generating {path}""" )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split('''-''')
lowercase_ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 354 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=A__ ).to(A__ )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_SCREAMING_SNAKE_CASE : Any = model(input_ids.to(A__ ) , labels=labels.to(A__ ) ).loss
_SCREAMING_SNAKE_CASE : str = -(labels.shape[-1] * loss.item())
_SCREAMING_SNAKE_CASE : Dict = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 572 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a__ : Any = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase_ ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , A__ , )
super().__init__(*A__ , **A__ )
| 368 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a_ = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a_ = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.dtype(numpy.uintaa).newbyteorder(">")
return numpy.frombuffer(bytestream.read(4) , dtype=lowerCAmelCase__)[0]
@deprecated(lowerCAmelCase__ , "Please use tf.data to implement this functionality.")
def lowerCamelCase__ ( _a):
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=lowerCAmelCase__) as bytestream:
SCREAMING_SNAKE_CASE : Dict = _readaa(lowerCAmelCase__)
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name))
SCREAMING_SNAKE_CASE : Any = _readaa(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Optional[int] = _readaa(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Tuple = _readaa(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Any = bytestream.read(rows * cols * num_images)
SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta)
SCREAMING_SNAKE_CASE : str = data.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 1)
return data
@deprecated(lowerCAmelCase__ , "Please use tf.one_hot on tensors.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = labels_dense.shape[0]
SCREAMING_SNAKE_CASE : Tuple = numpy.arange(lowerCAmelCase__) * num_classes
SCREAMING_SNAKE_CASE : Tuple = numpy.zeros((num_labels, num_classes))
SCREAMING_SNAKE_CASE : List[Any] = 1
return labels_one_hot
@deprecated(lowerCAmelCase__ , "Please use tf.data to implement this functionality.")
def lowerCamelCase__ ( _a , _a=False , _a=10):
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=lowerCAmelCase__) as bytestream:
SCREAMING_SNAKE_CASE : Optional[int] = _readaa(lowerCAmelCase__)
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name))
SCREAMING_SNAKE_CASE : Dict = _readaa(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : List[str] = bytestream.read(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : List[Any] = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(lowerCAmelCase__ , lowerCAmelCase__)
return labels
class _UpperCamelCase :
'''simple docstring'''
@deprecated(
A__ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Optional[Any] , a : Union[str, Any] , a : Union[str, Any] , a : List[Any]=False , a : str=False , a : Dict=dtypes.floataa , a : Tuple=True , a : str=None , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = random_seed.get_seed(A__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE : Any = dtypes.as_dtype(A__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
SCREAMING_SNAKE_CASE : Tuple = 1_0000
SCREAMING_SNAKE_CASE : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
SCREAMING_SNAKE_CASE : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE : Optional[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE : Union[str, Any] = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = numpy.multiply(A__ , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE : Dict = images
SCREAMING_SNAKE_CASE : List[Any] = labels
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
@property
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
return self._images
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self._labels
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self._num_examples
@property
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
return self._epochs_completed
def __UpperCamelCase ( self : List[Any] , a : int , a : Tuple=False , a : Optional[Any]=True ) -> Any:
"""simple docstring"""
if fake_data:
SCREAMING_SNAKE_CASE : str = [1] * 784
SCREAMING_SNAKE_CASE : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A__ )],
[fake_label for _ in range(A__ )],
)
SCREAMING_SNAKE_CASE : int = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(A__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.images[perma]
SCREAMING_SNAKE_CASE : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE : Optional[Any] = self._num_examples - start
SCREAMING_SNAKE_CASE : List[Any] = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE : List[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A__ )
SCREAMING_SNAKE_CASE : Any = self.images[perm]
SCREAMING_SNAKE_CASE : int = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : int = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE : List[str] = self._index_in_epoch
SCREAMING_SNAKE_CASE : Union[str, Any] = self._images[start:end]
SCREAMING_SNAKE_CASE : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE : List[str] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase__ , "Please write your own downloading logic.")
def lowerCamelCase__ ( _a , _a , _a):
if not gfile.Exists(lowerCAmelCase__):
gfile.MakeDirs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if not gfile.Exists(lowerCAmelCase__):
urllib.request.urlretrieve(lowerCAmelCase__ , lowerCAmelCase__) # noqa: S310
with gfile.GFile(lowerCAmelCase__) as f:
SCREAMING_SNAKE_CASE : str = f.size()
print("Successfully downloaded" , lowerCAmelCase__ , lowerCAmelCase__ , "bytes.")
return filepath
@deprecated(
lowerCAmelCase__ , "Please use alternatives such as:" " tensorflow_datasets.load(\'mnist\')")
def lowerCamelCase__ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase__ , one_hot=lowerCAmelCase__ , dtype=lowerCAmelCase__ , seed=lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Optional[Any] = fake()
SCREAMING_SNAKE_CASE : List[str] = fake()
SCREAMING_SNAKE_CASE : int = fake()
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__)
if not source_url: # empty string check
SCREAMING_SNAKE_CASE : Optional[Any] = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE : Optional[int] = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE : int = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE : Dict = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE : Tuple = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE : List[str] = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_images_file)
with gfile.Open(lowerCAmelCase__ , "rb") as f:
SCREAMING_SNAKE_CASE : Dict = _extract_images(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Dict = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_labels_file)
with gfile.Open(lowerCAmelCase__ , "rb") as f:
SCREAMING_SNAKE_CASE : Optional[Any] = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Any = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_images_file)
with gfile.Open(lowerCAmelCase__ , "rb") as f:
SCREAMING_SNAKE_CASE : Dict = _extract_images(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Optional[int] = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_labels_file)
with gfile.Open(lowerCAmelCase__ , "rb") as f:
SCREAMING_SNAKE_CASE : List[Any] = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__)
if not 0 <= validation_size <= len(lowerCAmelCase__):
SCREAMING_SNAKE_CASE : Any = (
'''Validation size should be between 0 and '''
f"{len(lowerCAmelCase__)}. Received: {validation_size}."
)
raise ValueError(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Any = train_images[:validation_size]
SCREAMING_SNAKE_CASE : List[Any] = train_labels[:validation_size]
SCREAMING_SNAKE_CASE : Dict = train_images[validation_size:]
SCREAMING_SNAKE_CASE : Optional[int] = train_labels[validation_size:]
SCREAMING_SNAKE_CASE : List[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE : Any = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Tuple = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Optional[int] = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__) | 25 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCAmelCase ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 76 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 0 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Optional[Any] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = """mask2former"""
__SCREAMING_SNAKE_CASE = ["""swin"""]
__SCREAMING_SNAKE_CASE = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Tuple , a_ : Optional[Dict] = None , a_ : int = 256 , a_ : int = 256 , a_ : int = 256 , a_ : int = 1_024 , a_ : str = "relu" , a_ : int = 6 , a_ : int = 10 , a_ : int = 8 , a_ : float = 0.0 , a_ : int = 2_048 , a_ : bool = False , a_ : bool = False , a_ : int = 4 , a_ : int = 255 , a_ : int = 100 , a_ : float = 0.1 , a_ : float = 2.0 , a_ : float = 5.0 , a_ : float = 5.0 , a_ : int = 12_544 , a_ : float = 3.0 , a_ : float = 0.75 , a_ : float = 0.02 , a_ : float = 1.0 , a_ : bool = True , a_ : List[int] = [4, 8, 16, 32] , a_ : bool = None , **a_ : Any , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
__snake_case = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=A__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(A__ , A__ ):
__snake_case = backbone_config.pop("model_type" )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(A__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
__snake_case = backbone_config
__snake_case = feature_size
__snake_case = mask_feature_size
__snake_case = hidden_dim
__snake_case = encoder_feedforward_dim
__snake_case = activation_function
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = num_attention_heads
__snake_case = dropout
__snake_case = dim_feedforward
__snake_case = pre_norm
__snake_case = enforce_input_projection
__snake_case = common_stride
__snake_case = ignore_value
__snake_case = num_queries
__snake_case = no_object_weight
__snake_case = class_weight
__snake_case = mask_weight
__snake_case = dice_weight
__snake_case = train_num_points
__snake_case = oversample_ratio
__snake_case = importance_sample_ratio
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = use_auxiliary_loss
__snake_case = feature_strides
__snake_case = output_auxiliary_logits
__snake_case = decoder_layers
super().__init__(**A__ )
@classmethod
def A ( cls : Dict , a_ : PretrainedConfig , **a_ : str ):
"""simple docstring"""
return cls(
backbone_config=A__ , **A__ , )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.backbone_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 69 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
from __future__ import annotations
import math
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 100001, 2) if not is_prime(num)]
def __UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
lowercase = []
for num in range(len(lowerCAmelCase__ ) ):
lowercase = 0
while 2 * i * i <= odd_composites[num]:
lowercase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def __UpperCAmelCase ( )-> Any:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 604 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 0 ) -> int:
__lowerCamelCase : List[Any] = length or len(lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowerCamelCase : Optional[Any] = list_data[i + 1], list_data[i]
__lowerCamelCase : int = True
return list_data if not swapped else bubble_sort(lowerCAmelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 0 |
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Any:
__lowercase = a
__lowercase = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__lowercase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
__lowercase = mid
else:
__lowercase = mid
__lowercase = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 375 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__magic_name__: List[str] = False
__magic_name__: Dict = True
__magic_name__: str = False
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__magic_name__: List[Any] = parser.parse_args()
__magic_name__: Union[str, Any] = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__magic_name__: Optional[Any] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__magic_name__: List[str] = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__magic_name__: List[str] = reader.read()
__magic_name__: str = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__magic_name__: Optional[int] = UNetaDModel(**config)
else:
__magic_name__: Optional[Any] = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__magic_name__: List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__magic_name__: int = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__magic_name__: List[Any] = config[key]
del config[key]
__magic_name__: Union[str, Any] = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__magic_name__: Dict = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__magic_name__: Tuple = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__magic_name__: Union[str, Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__magic_name__: int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__magic_name__: str = param_value
__magic_name__: List[str] = True
if not has_changed:
__magic_name__: List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 324 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ) -> Dict:
if radian_mode:
return [magnitude * cos(lowerCAmelCase__ ), magnitude * sin(lowerCAmelCase__ )]
return [magnitude * cos(radians(lowerCAmelCase__ ) ), magnitude * sin(radians(lowerCAmelCase__ ) )]
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10**-1 ) -> List[Any]:
UpperCAmelCase = cross(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = sum(lowerCAmelCase__ )
return abs(lowerCAmelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowerCamelCase : int = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
__lowerCamelCase : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowerCamelCase : List[str] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowerCamelCase : List[str] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowerCamelCase : Optional[Any] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
__lowerCamelCase : int = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "BlipImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : Dict , snake_case_ : Tuple , snake_case_ : str)-> str:
__lowerCAmelCase =False
super().__init__(A__ , A__)
__lowerCAmelCase =self.image_processor
def __call__( self : Optional[int] , snake_case_ : ImageInput = None , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : List[Any] , )-> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""")
# Get only text
if images is None:
__lowerCAmelCase =self.tokenizer
__lowerCAmelCase =self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
return text_encoding
# add pixel_values
__lowerCAmelCase =self.image_processor(A__ , return_tensors=A__)
if text is not None:
__lowerCAmelCase =self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
else:
__lowerCAmelCase =None
if text_encoding is not None:
encoding_image_processor.update(A__)
return encoding_image_processor
def UpperCamelCase ( self : int , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any])-> List[Any]:
return self.tokenizer.batch_decode(*A__ , **A__)
def UpperCamelCase ( self : List[str] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any])-> Any:
return self.tokenizer.decode(*A__ , **A__)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase ( self : int)-> Optional[Any]:
__lowerCAmelCase =self.tokenizer.model_input_names
__lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 354 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : List[str] = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Any = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = {
'''Authorization''': f'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(lowerCAmelCase__, headers=lowerCAmelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 572 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 0 |
'''simple docstring'''
from math import pi
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 368 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a_ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( lowerCAmelCase_ , unittest.TestCase ):
UpperCamelCase =LayoutLMTokenizer
UpperCamelCase =LayoutLMTokenizerFast
UpperCamelCase =True
UpperCamelCase =True
def _lowerCamelCase ( self ) -> Any:
super().setUp()
__lowercase : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Tuple:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A__ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : List[str] = '''UNwant\u00E9d,running'''
__lowercase : str = '''unwanted, running'''
return input_text, output_text
def _lowerCamelCase ( self ) -> Any:
__lowercase : Tuple = self.tokenizer_class(self.vocab_file )
__lowercase : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 10, 8, 9] )
def _lowerCamelCase ( self ) -> Union[str, Any]:
pass
| 76 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a : Optional[Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
# save results
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ , "config.json" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , "config.json" ) ):
os.remove(os.path.join(lowerCAmelCase__ , "config.json" ) )
if os.path.exists(os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Any=False ) -> Optional[Any]:
__snake_case = 2
if unlogit:
__snake_case = torch.pow(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case = p * torch.log(lowerCAmelCase__ )
__snake_case = 0
return -plogp.sum(dim=-1 )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Optional[Any]:
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=False ) -> List[str]:
__snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
__snake_case = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
if head_mask is None:
__snake_case = torch.ones(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case = None
__snake_case = 0.0
__snake_case = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__snake_case = tuple(t.to(args.device ) for t in inputs )
(__snake_case ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
__snake_case = entropy(attn.detach() , lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case = 2
__snake_case = torch.pow(torch.pow(lowerCAmelCase__ , lowerCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
__snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(lowerCAmelCase__ )
logger.info("Head ranked by importance scores" )
__snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case = torch.arange(
head_importance.numel() , device=args.device )
__snake_case = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
__snake_case = compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ )
__snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , lowerCAmelCase__ , original_score * args.masking_threshold )
__snake_case = torch.ones_like(lowerCAmelCase__ )
__snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case = float("Inf" )
__snake_case = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__snake_case = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__snake_case = new_head_mask.view(-1 )
__snake_case = 0.0
__snake_case = new_head_mask.view_as(lowerCAmelCase__ )
__snake_case = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
__snake_case = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__snake_case = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , lowerCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ) -> Tuple:
__snake_case = datetime.now()
__snake_case = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__snake_case = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = datetime.now()
__snake_case = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , actually_pruned=lowerCAmelCase__ , )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowerCAmelCase__ , lowerCAmelCase__ , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(lowerCAmelCase__ , args.output_dir )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=lowerCAmelCase__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=lowerCAmelCase__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=lowerCAmelCase__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=lowerCAmelCase__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=lowerCAmelCase__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=lowerCAmelCase__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=lowerCAmelCase__ , help="Batch size." )
parser.add_argument("--seed" , type=lowerCAmelCase__ , default=42 )
parser.add_argument("--local_rank" , type=lowerCAmelCase__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=lowerCAmelCase__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCAmelCase__ , default="" , help="Can be used for distant debugging." )
__snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case = torch.device("cuda" , args.local_rank )
__snake_case = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
__snake_case = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , lowerCAmelCase__ )
# Prepare dataset
__snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case = (torch.from_numpy(lowerCAmelCase__ ),)
__snake_case = TensorDataset(*lowerCAmelCase__ )
__snake_case = RandomSampler(lowerCAmelCase__ )
__snake_case = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case = mask_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 69 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase_ ):
lowercase = ['pixel_values']
def __init__( self : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_55 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
lowercase = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
lowercase = get_size_dict(A__ , default_to_square=A__ )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def __a ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(A__ , default_to_square=A__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
lowercase = (size['''height'''], size['''width'''])
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def __a ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Any , ) -> Any:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def __a ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def __a ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Dict[str, int]] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = size if size is not None else self.size
lowercase = get_size_dict(A__ , default_to_square=A__ )
lowercase = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(A__ ) for image in images]
if do_resize:
lowercase = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
lowercase = [to_channel_dimension_format(A__ , A__ ) for image in images]
lowercase = BatchFeature(data={'''pixel_values''': images} , tensor_type=A__ )
return encoded_outputs
| 604 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 0 |
import numpy as np
a =[
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class A_ :
def __init__( self : Dict):
__lowerCamelCase : int = np.array(A__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = np.where(letter == self.SQUARE)
__lowerCamelCase : List[str] = np.concatenate([indexa + 1, indexa + 1])
return indexes
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = message.lower()
__lowerCamelCase : Tuple = message.replace(' ' ,'')
__lowerCamelCase : Optional[int] = message.replace('j' ,'i')
__lowerCamelCase : Any = np.empty((2, len(A__)))
for letter_index in range(len(A__)):
__lowerCamelCase : Any = self.letter_to_numbers(message[letter_index])
__lowerCamelCase : Any = numbers[0]
__lowerCamelCase : int = numbers[1]
__lowerCamelCase : Union[str, Any] = first_step.reshape(2 * len(A__))
__lowerCamelCase : str = ''''''
for numbers_index in range(len(A__)):
__lowerCamelCase : Tuple = int(second_step[numbers_index * 2])
__lowerCamelCase : Tuple = int(second_step[(numbers_index * 2) + 1])
__lowerCamelCase : Union[str, Any] = self.numbers_to_letter(A__ ,A__)
__lowerCamelCase : Union[str, Any] = encoded_message + letter
return encoded_message
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = message.lower()
message.replace(' ' ,'')
__lowerCamelCase : int = np.empty(2 * len(A__))
for letter_index in range(len(A__)):
__lowerCamelCase : Optional[int] = self.letter_to_numbers(message[letter_index])
__lowerCamelCase : Tuple = numbers[0]
__lowerCamelCase : List[Any] = numbers[1]
__lowerCamelCase : Optional[Any] = first_step.reshape((2, len(A__)))
__lowerCamelCase : Any = ''''''
for numbers_index in range(len(A__)):
__lowerCamelCase : Union[str, Any] = int(second_step[0, numbers_index])
__lowerCamelCase : Union[str, Any] = int(second_step[1, numbers_index])
__lowerCamelCase : Dict = self.numbers_to_letter(A__ ,A__)
__lowerCamelCase : List[str] = decoded_message + letter
return decoded_message
| 652 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = ['''image_processor''', '''tokenizer''']
__UpperCamelCase = '''LayoutLMv2ImageProcessor'''
__UpperCamelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Any ) -> Any:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A__ , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A__ , A__ )
def __call__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[Any] , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=A__ , return_tensors=A__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A__ , A__ ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['''words''']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(A__ , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A__ ) != len(A__ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F" {len(A__ )} and {len(A__ )}" )
return images_with_overflow
def UpperCAmelCase ( self : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCAmelCase ( self : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A__ , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A__ , )
return self.image_processor
| 375 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__: Optional[Any] = logging.get_logger(__name__)
__magic_name__: List[Any] = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase__ : Tuple = '''dinat'''
lowercase__ : Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=[3, 4, 6, 5] , lowerCAmelCase__=[2, 4, 8, 16] , lowerCAmelCase__=7 , lowerCAmelCase__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCAmelCase__=3.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(**A__ )
__magic_name__ : Any = patch_size
__magic_name__ : Any = num_channels
__magic_name__ : Dict = embed_dim
__magic_name__ : Optional[int] = depths
__magic_name__ : List[Any] = len(A__ )
__magic_name__ : int = num_heads
__magic_name__ : Tuple = kernel_size
__magic_name__ : Dict = dilations
__magic_name__ : Any = mlp_ratio
__magic_name__ : List[Any] = qkv_bias
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : str = int(embed_dim * 2 ** (len(A__ ) - 1) )
__magic_name__ : Tuple = layer_scale_init_value
__magic_name__ : Union[str, Any] = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(A__ ) + 1 )]
__magic_name__ : List[str] = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names )
| 324 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
def lowerCamelCase_() -> Optional[int]:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCAmelCase = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase = json.loads(lowerCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase = json.loads(lowerCAmelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __magic_name__ ( lowerCAmelCase_ ):
lowercase : str =field(
default='''''', metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''}, )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , A__ , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCAmelCase = torch.device("cpu" )
UpperCAmelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase = smp.local_rank()
UpperCAmelCase = torch.device("cuda" , A__ )
UpperCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCAmelCase = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCAmelCase = torch.device("cuda" , self.local_rank )
UpperCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCAmelCase = torch.device("cuda" , self.local_rank )
UpperCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(A__ )
return device
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return False
| 323 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __lowerCamelCase : Any ) -> Dict:
__lowerCAmelCase =torch.exp(lowerCAmelCase__ )
__lowerCAmelCase =torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
__lowerCAmelCase =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class __a ( nn.Module ):
def __init__( self : Any , snake_case_ : Union[str, Any])-> Union[str, Any]:
super().__init__()
__lowerCAmelCase =config.output_attentions
__lowerCAmelCase =config.output_hidden_states
__lowerCAmelCase =nn.ModuleList([BertLayer(A__) for _ in range(config.num_hidden_layers)])
__lowerCAmelCase =nn.ModuleList([BertHighway(A__) for _ in range(config.num_hidden_layers)])
__lowerCAmelCase =[-1 for _ in range(config.num_hidden_layers)]
def UpperCamelCase ( self : List[str] , snake_case_ : Any)-> Optional[Any]:
if (type(A__) is float) or (type(A__) is int):
for i in range(len(self.early_exit_entropy)):
__lowerCAmelCase =x
else:
__lowerCAmelCase =x
def UpperCamelCase ( self : Dict , snake_case_ : Dict)-> Optional[Any]:
__lowerCAmelCase =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def UpperCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=None , )-> Optional[Any]:
__lowerCAmelCase =()
__lowerCAmelCase =()
__lowerCAmelCase =()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
__lowerCAmelCase =all_hidden_states + (hidden_states,)
__lowerCAmelCase =layer_module(
A__ , A__ , head_mask[i] , A__ , A__)
__lowerCAmelCase =layer_outputs[0]
if self.output_attentions:
__lowerCAmelCase =all_attentions + (layer_outputs[1],)
__lowerCAmelCase =(hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase =current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase =current_outputs + (all_attentions,)
__lowerCAmelCase =self.highway[i](A__)
# logits, pooled_output
if not self.training:
__lowerCAmelCase =highway_exit[0]
__lowerCAmelCase =entropy(A__)
__lowerCAmelCase =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCAmelCase =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCAmelCase =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ , i + 1)
else:
__lowerCAmelCase =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCAmelCase =all_hidden_states + (hidden_states,)
__lowerCAmelCase =(hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase =outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase =outputs + (all_attentions,)
__lowerCAmelCase =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , lowerCAmelCase_ , )
class __a ( lowerCAmelCase_ ):
def __init__( self : str , snake_case_ : Any)-> List[str]:
super().__init__(A__)
__lowerCAmelCase =config
__lowerCAmelCase =BertEmbeddings(A__)
__lowerCAmelCase =DeeBertEncoder(A__)
__lowerCAmelCase =BertPooler(A__)
self.init_weights()
def UpperCamelCase ( self : List[Any])-> int:
self.encoder.init_highway_pooler(self.pooler)
def UpperCamelCase ( self : str)-> int:
return self.embeddings.word_embeddings
def UpperCamelCase ( self : List[Any] , snake_case_ : Tuple)-> Any:
__lowerCAmelCase =value
def UpperCamelCase ( self : Optional[int] , snake_case_ : Optional[Any])-> Optional[int]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__)
@add_start_docstrings_to_model_forward(A__)
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int]=None , snake_case_ : Optional[Any]=None , snake_case_ : Any=None , snake_case_ : str=None , snake_case_ : Optional[int]=None , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : str=None , )-> Any:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
__lowerCAmelCase =input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase =inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
__lowerCAmelCase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase =torch.ones(A__ , device=A__)
if encoder_attention_mask is None:
__lowerCAmelCase =torch.ones(A__ , device=A__)
if token_type_ids is None:
__lowerCAmelCase =torch.zeros(A__ , dtype=torch.long , device=A__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase =self.get_extended_attention_mask(A__ , A__ , A__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCAmelCase =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCAmelCase =encoder_attention_mask[:, None, None, :]
__lowerCAmelCase =encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
__lowerCAmelCase =(1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase =self.get_head_mask(A__ , self.config.num_hidden_layers)
__lowerCAmelCase =self.embeddings(
input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__)
__lowerCAmelCase =self.encoder(
A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
__lowerCAmelCase =encoder_outputs[0]
__lowerCAmelCase =self.pooler(A__)
__lowerCAmelCase =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __a ( lowerCAmelCase_ ):
def __init__( self : str , snake_case_ : List[Any] , snake_case_ : Dict)-> Tuple:
__lowerCAmelCase =message
__lowerCAmelCase =exit_layer # start from 1!
class __a ( nn.Module ):
def __init__( self : Optional[int] , snake_case_ : List[Any])-> Optional[int]:
super().__init__()
__lowerCAmelCase =BertPooler(A__)
__lowerCAmelCase =nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase =nn.Linear(config.hidden_size , config.num_labels)
def UpperCamelCase ( self : List[Any] , snake_case_ : Optional[Any])-> str:
__lowerCAmelCase =encoder_outputs[0]
__lowerCAmelCase =self.pooler(A__)
# "return" pooler_output
# BertModel
__lowerCAmelCase =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCAmelCase =bmodel_output[1]
__lowerCAmelCase =self.dropout(A__)
__lowerCAmelCase =self.classifier(A__)
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , lowerCAmelCase_ , )
class __a ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any])-> Dict:
super().__init__(A__)
__lowerCAmelCase =config.num_labels
__lowerCAmelCase =config.num_hidden_layers
__lowerCAmelCase =DeeBertModel(A__)
__lowerCAmelCase =nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase =nn.Linear(config.hidden_size , self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(A__)
def UpperCamelCase ( self : Any , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None , snake_case_ : Tuple=None , snake_case_ : Any=None , snake_case_ : List[str]=-1 , snake_case_ : List[str]=False , )-> List[Any]:
__lowerCAmelCase =self.num_layers
try:
__lowerCAmelCase =self.bert(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCAmelCase =outputs[1]
__lowerCAmelCase =self.dropout(A__)
__lowerCAmelCase =self.classifier(A__)
__lowerCAmelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase =e.message
__lowerCAmelCase =e.exit_layer
__lowerCAmelCase =outputs[0]
if not self.training:
__lowerCAmelCase =entropy(A__)
__lowerCAmelCase =[]
__lowerCAmelCase =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase =MSELoss()
__lowerCAmelCase =loss_fct(logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase =CrossEntropyLoss()
__lowerCAmelCase =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__lowerCAmelCase =[]
for highway_exit in outputs[-1]:
__lowerCAmelCase =highway_exit[0]
if not self.training:
highway_logits_all.append(A__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase =MSELoss()
__lowerCAmelCase =loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase =CrossEntropyLoss()
__lowerCAmelCase =loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(A__)
if train_highway:
__lowerCAmelCase =(sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase =(loss,) + outputs
if not self.training:
__lowerCAmelCase =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 354 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Union[str, Any]:
if num < 0:
return False
_SCREAMING_SNAKE_CASE : int = num
_SCREAMING_SNAKE_CASE : int = 0
while num > 0:
_SCREAMING_SNAKE_CASE : List[str] = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a__ : Tuple = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
a_ = 637_8137.0
a_ = 635_6752.31_4245
a_ = 637_8137
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : List[str] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase__)))
SCREAMING_SNAKE_CASE : int = atan((1 - flattening) * tan(radians(lowerCAmelCase__)))
SCREAMING_SNAKE_CASE : Optional[int] = radians(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Optional[int] = radians(lowerCAmelCase__)
# Equation
SCREAMING_SNAKE_CASE : Optional[int] = sin((phi_a - phi_a) / 2)
SCREAMING_SNAKE_CASE : List[Any] = sin((lambda_a - lambda_a) / 2)
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE : Any = sqrt(sin_sq_phi + (cos(lowerCAmelCase__) * cos(lowerCAmelCase__) * sin_sq_lambda))
return 2 * RADIUS * asin(lowerCAmelCase__)
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 | 0 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a_ = logging.get_logger(__name__)
a_ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
a_ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
a_ = {
'jukebox': 5_1_2,
}
class UpperCAmelCase_ ( lowerCAmelCase_ ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_LYRIC_TOKENS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=["v3", "v2", "v2"] , UpperCamelCase_=5_12 , UpperCamelCase_=5 , UpperCamelCase_="<|endoftext|>" , **UpperCamelCase_ , ) -> Tuple:
__lowercase : Union[str, Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else unk_token
super().__init__(
unk_token=A__ , n_genres=A__ , version=A__ , max_n_lyric_tokens=A__ , **A__ , )
__lowercase : List[str] = version
__lowercase : Optional[Any] = max_n_lyric_tokens
__lowercase : Tuple = n_genres
with open(A__ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[Any] = json.load(A__ )
with open(A__ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Any = json.load(A__ )
with open(A__ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Dict = json.load(A__ )
__lowercase : Tuple = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase : str = oov.replace(R'''\-\'''' , R'''\-+\'''' )
__lowercase : List[str] = regex.compile(A__ )
__lowercase : List[str] = {v: k for k, v in self.artists_encoder.items()}
__lowercase : Optional[Any] = {v: k for k, v in self.genres_encoder.items()}
__lowercase : Union[str, Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowerCamelCase ( self ) -> Tuple:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowerCamelCase ( self ) -> Optional[int]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : Union[str, Any] = [self.artists_encoder.get(A__ , 0 ) for artist in list_artists]
for genres in range(len(A__ ) ):
__lowercase : Dict = [self.genres_encoder.get(A__ , 0 ) for genre in list_genres[genres]]
__lowercase : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase : Dict = [[self.lyrics_encoder.get(A__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return list(A__ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
__lowercase : Optional[int] = self.prepare_for_tokenization(A__ , A__ , A__ )
__lowercase : Any = self._tokenize(A__ )
return artist, genre, lyrics
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase : Optional[Any] = artists[idx].lower()
__lowercase : Union[str, Any] = [genres[idx].lower()]
else:
__lowercase : str = self._normalize(artists[idx] ) + '''.v2'''
__lowercase : Union[str, Any] = [
self._normalize(A__ ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase : Any = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__lowercase : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__lowercase : List[Any] = {vocab[index]: index + 1 for index in range(len(A__ ) )}
__lowercase : int = 0
__lowercase : List[str] = len(A__ ) + 1
__lowercase : Any = self.vocab
__lowercase : List[str] = {v: k for k, v in self.vocab.items()}
__lowercase : Any = ''''''
else:
__lowercase : Any = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__lowercase : Any = self._run_strip_accents(A__ )
__lowercase : Dict = lyrics.replace('''\\''' , '''\n''' )
__lowercase : Union[str, Any] = self.out_of_vocab.sub('''''' , A__ ), [], []
return artists, genres, lyrics
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : Union[str, Any] = unicodedata.normalize('''NFD''' , A__ )
__lowercase : int = []
for char in text:
__lowercase : Union[str, Any] = unicodedata.category(A__ )
if cat == "Mn":
continue
output.append(A__ )
return "".join(A__ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : List[Any] = (
[chr(A__ ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(A__ ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(A__ ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__lowercase : Dict = frozenset(A__ )
__lowercase : Tuple = re.compile(R'''_+''' )
__lowercase : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__lowercase : Any = pattern.sub('''_''' , A__ ).strip('''_''' )
return text
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return " ".join(A__ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> int:
if not isinstance(A__ , A__ ):
__lowercase : Optional[Any] = TensorType(A__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__lowercase : Dict = tf.constant
__lowercase : Optional[Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__lowercase : Optional[Any] = torch.tensor
__lowercase : int = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__lowercase : str = jnp.array
__lowercase : str = _is_jax
else:
__lowercase : int = np.asarray
__lowercase : List[str] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase : int = [inputs]
if not is_tensor(A__ ):
__lowercase : str = as_tensor(A__ )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="" , UpperCamelCase_="pt" ) -> BatchEncoding:
__lowercase : Dict = [0, 0, 0]
__lowercase : str = [artist] * len(self.version )
__lowercase : Optional[int] = [genres] * len(self.version )
__lowercase : Union[str, Any] = self.tokenize(A__ , A__ , A__ )
__lowercase : Any = self._convert_token_to_id(A__ , A__ , A__ )
__lowercase : str = [-INFINITY] * len(full_tokens[-1] )
__lowercase : Optional[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : Optional[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A__ ) )
__lowercase : List[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A__ ) )
__lowercase : Optional[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A__ ) )
return (artists_file, genres_file, lyrics_file)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Tuple = self.artists_decoder.get(A__ )
__lowercase : Any = [self.genres_decoder.get(A__ ) for genre in genres_index]
__lowercase : Any = [self.lyrics_decoder.get(A__ ) for character in lyric_index]
return artist, genres, lyrics
| 76 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a : str = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 131_072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
}
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
return torch.atana(lowerCAmelCase__ , lowerCAmelCase__ ) / math.pi * 2
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
__snake_case = torch.sin(t * math.pi / 2 ) ** 2
__snake_case = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase__ , lowerCAmelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Union[str, Any] , a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__snake_case = DiffusionAttnUnetaD(A__ , n_attn_layers=4 )
__snake_case = deepcopy(self.diffusion )
__snake_case = torch.quasirandom.SobolEngine(1 , scramble=A__ )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
a : str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
a : Tuple = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
a : Optional[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
a : Optional[int] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
a : Any = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
a : Any = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Dict:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
elif name.startswith(lowerCAmelCase__ ):
return [name.replace(lowerCAmelCase__ , lowerCAmelCase__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=13 ) -> Any:
__snake_case = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
__snake_case = 0
if string.startswith("net.3." ):
depth += 1
__snake_case = string[6:]
elif string.startswith("net." ):
__snake_case = string[4:]
while string.startswith("main.7." ):
depth += 1
__snake_case = string[7:]
if string.startswith("main." ):
__snake_case = string[5:]
# mid block
if string[:2].isdigit():
__snake_case = string[:2]
__snake_case = string[2:]
else:
__snake_case = string[0]
__snake_case = string[1:]
if depth == max_depth:
__snake_case = MID_NUM_TO_LAYER[layer_num]
__snake_case = '''mid_block'''
elif depth > 0 and int(lowerCAmelCase__ ) < 7:
__snake_case = DOWN_NUM_TO_LAYER[layer_num]
__snake_case = F'''down_blocks.{depth}'''
elif depth > 0 and int(lowerCAmelCase__ ) > 7:
__snake_case = UP_NUM_TO_LAYER[layer_num]
__snake_case = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__snake_case = DEPTH_0_TO_LAYER[layer_num]
__snake_case = F'''up_blocks.{max_depth - 1}''' if int(lowerCAmelCase__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
__snake_case = string_left[1:]
if "resnets" in new_layer:
__snake_case = convert_resconv_naming(lowerCAmelCase__ )
elif "attentions" in new_layer:
__snake_case = convert_attn_naming(lowerCAmelCase__ )
__snake_case = new_string_left
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__snake_case = prefix + '''.''' + new_layer + '''.''' + string_left
else:
__snake_case = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Any:
__snake_case = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
__snake_case = rename(lowerCAmelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__snake_case = transform_conv_attns(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__snake_case = v
return new_state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
if len(lowerCAmelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
__snake_case = v[:, :, 0]
else:
# bias
__snake_case = v
else:
# qkv matrices
__snake_case = v.shape[0]
__snake_case = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__snake_case = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__snake_case = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Optional[int]:
__snake_case = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__snake_case = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__snake_case = download(lowerCAmelCase__ )
__snake_case = MODELS_MAP[model_name]['''sample_rate''']
__snake_case = MODELS_MAP[model_name]['''sample_size''']
__snake_case = Object()
__snake_case = sample_size
__snake_case = sample_rate
__snake_case = 0
__snake_case = UNetaDModel(sample_size=lowerCAmelCase__ , sample_rate=lowerCAmelCase__ )
__snake_case = diffusers_model.state_dict()
__snake_case = DiffusionUncond(lowerCAmelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase__ )["state_dict"] )
__snake_case = orig_model.diffusion_ema.eval()
__snake_case = orig_model.state_dict()
__snake_case = rename_orig_weights(lowerCAmelCase__ )
__snake_case = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__snake_case = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(lowerCAmelCase__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__snake_case = value.squeeze()
__snake_case = value
diffusers_model.load_state_dict(lowerCAmelCase__ )
__snake_case = 1_00
__snake_case = 33
__snake_case = IPNDMScheduler(num_train_timesteps=lowerCAmelCase__ )
__snake_case = torch.manual_seed(lowerCAmelCase__ )
__snake_case = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase__ ).to(lowerCAmelCase__ )
__snake_case = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase__ )[:-1]
__snake_case = get_crash_schedule(lowerCAmelCase__ )
__snake_case = DanceDiffusionPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
__snake_case = torch.manual_seed(33 )
__snake_case = pipe(num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ ).audios
__snake_case = sampling.iplms_sample(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {} )
__snake_case = generated.clamp(-1 , 1 )
__snake_case = (generated - audio).abs().sum()
__snake_case = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowerCAmelCase__ )
print("Diff max" , lowerCAmelCase__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a : Dict = parser.parse_args()
main(args)
| 69 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
lowercase = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase__ )
if number < 1:
lowercase = f'Input value of [number={number}] must be > 0'
raise ValueError(lowerCAmelCase__ )
lowercase = 1
for i in range(1, lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0**9 ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : str = 2
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCamelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 0 |
SCREAMING_SNAKE_CASE_ : Optional[Any] = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE_ : Tuple = 100_0003
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
__lowercase = len(lowerCAmelCase__ )
__lowercase = len(lowerCAmelCase__ )
if p_len > t_len:
return False
__lowercase = 0
__lowercase = 0
__lowercase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase__ ):
__lowercase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowercase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowercase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowercase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = '''abc1abc12'''
__lowercase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__lowercase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) and not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 2)
__lowercase = '''ABABX'''
__lowercase = '''ABABZABABYABABX'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 3)
__lowercase = '''AAAB'''
__lowercase = '''ABAAAAAB'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 4)
__lowercase = '''abcdabcy'''
__lowercase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
# Test 5)
__lowercase = '''Lü'''
__lowercase = '''Lüsai'''
assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = '''Lue'''
assert not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 375 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class snake_case__ :
lowercase__ : List[str] = MBartConfig
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> Dict:
__magic_name__ : Dict = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : Any = is_training
__magic_name__ : Optional[int] = use_labels
__magic_name__ : Dict = vocab_size
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : List[str] = bos_token_id
def __magic_name__ ( self ) -> str:
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__magic_name__ : str = prepare_mbart_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : str = TFMBartModel(config=A__ ).get_decoder()
__magic_name__ : Dict = inputs_dict['''input_ids''']
__magic_name__ : List[str] = input_ids[:1, :]
__magic_name__ : Dict = inputs_dict['''attention_mask'''][:1, :]
__magic_name__ : Any = inputs_dict['''head_mask''']
__magic_name__ : List[str] = 1
# first forward pass
__magic_name__ : Optional[Any] = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
__magic_name__ : str = outputs.to_tuple()
__magic_name__ : Optional[int] = past_key_values[1]
def UpperCamelCase ( _A, _A, _A, _A=None, _A=None, _A=None, _A=None, _A=None, ):
"""simple docstring"""
if attention_mask is None:
__magic_name__ : Optional[Any] = tf.cast(tf.math.not_equal(lowerCAmelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
__magic_name__ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
__magic_name__ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
lowercase__ : Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase__ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase__ : Optional[int] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ : Any = True
lowercase__ : List[Any] = False
lowercase__ : List[Any] = False
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self ) -> int:
__magic_name__ : Union[str, Any] = TFMBartModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=A__ )
def __magic_name__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase ):
lowercase__ : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
lowercase__ : Tuple = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
lowercase__ : int = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = self.translate_src_text(**A__ )
self.assertListEqual(self.expected_text , A__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Optional[int] = self.tokenizer(self.src_text , **A__ , return_tensors="""tf""" )
__magic_name__ : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__magic_name__ : List[str] = self.tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
return generated_words
@slow
def __magic_name__ ( self ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 324 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
UpperCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(A__ , env=os.environ.copy() )
| 323 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowercase_ = {
'''distilbert-base-uncased''': 5_12,
'''distilbert-base-uncased-distilled-squad''': 5_12,
'''distilbert-base-cased''': 5_12,
'''distilbert-base-cased-distilled-squad''': 5_12,
'''distilbert-base-german-cased''': 5_12,
'''distilbert-base-multilingual-cased''': 5_12,
}
lowercase_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __a ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = DistilBertTokenizer
def __init__( self : str , snake_case_ : Optional[Any]=None , snake_case_ : Any=None , snake_case_ : Tuple=True , snake_case_ : List[Any]="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Tuple="[PAD]" , snake_case_ : Optional[int]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[str]=True , snake_case_ : Any=None , **snake_case_ : int , )-> str:
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
__lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , A__) != do_lower_case
or normalizer_state.get("""strip_accents""" , A__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A__) != tokenize_chinese_chars
):
__lowerCAmelCase =getattr(A__ , normalizer_state.pop("""type"""))
__lowerCAmelCase =do_lower_case
__lowerCAmelCase =strip_accents
__lowerCAmelCase =tokenize_chinese_chars
__lowerCAmelCase =normalizer_class(**A__)
__lowerCAmelCase =do_lower_case
def UpperCamelCase ( self : Tuple , snake_case_ : List[str] , snake_case_ : Dict=None)-> List[str]:
__lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None)-> List[int]:
__lowerCAmelCase =[self.sep_token_id]
__lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None)-> Tuple[str]:
__lowerCAmelCase =self._tokenizer.model.save(A__ , name=A__)
return tuple(A__)
| 354 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : float, lowerCamelCase__ : int ) -> Union[str, Any]:
if digit_amount > 0:
return round(number - int(lowerCAmelCase__ ), lowerCAmelCase__ )
return number - int(lowerCAmelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 572 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __snake_case ( lowerCAmelCase_ ):
__lowerCAmelCase = '''mgp-str'''
def __init__( self , UpperCamelCase_=[32, 128] , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=27 , UpperCamelCase_=38 , UpperCamelCase_=5_0257 , UpperCamelCase_=3_0522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=4.0 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=False , UpperCamelCase_=0.0_2 , **UpperCamelCase_ , ) -> int:
super().__init__(**A__ )
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = max_token_length
snake_case__ = num_character_labels
snake_case__ = num_bpe_labels
snake_case__ = num_wordpiece_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = mlp_ratio
snake_case__ = distilled
snake_case__ = layer_norm_eps
snake_case__ = drop_rate
snake_case__ = qkv_bias
snake_case__ = attn_drop_rate
snake_case__ = drop_path_rate
snake_case__ = output_aa_attentions
snake_case__ = initializer_range
| 368 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 0 |
import inspect
import unittest
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : int = '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : int = '''invisible-watermark'''
assert backend in deps, F"{backend} is not in the deps table!" | 25 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
# TODO: upload to AWS
a : List[str] = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = """retribert"""
def __init__( self : List[str] , a_ : Dict=30_522 , a_ : Any=768 , a_ : Union[str, Any]=8 , a_ : Dict=12 , a_ : Union[str, Any]=3_072 , a_ : str="gelu" , a_ : List[Any]=0.1 , a_ : Union[str, Any]=0.1 , a_ : Tuple=512 , a_ : Tuple=2 , a_ : Optional[Any]=0.02 , a_ : int=1e-12 , a_ : Dict=True , a_ : List[str]=128 , a_ : Tuple=0 , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , **A__ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = share_encoders
__snake_case = projection_dim
| 69 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688 | 0 |
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
lowercase = len(bin(lowerCAmelCase__ )[3:] )
lowercase = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
lowercase = (
(
'''1'''
+ '''0''' * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=A__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__lowerCamelCase : str = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=A__ ,instance_count=A__ ,instance_type=self.instance_type ,debugger_hook_config=A__ ,hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=A__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
TrainingJobAnalytics(A__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = self.create_estimator(A__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,A__)
| 652 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[str] = '''▁'''
SCREAMING_SNAKE_CASE_ : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class snake_case_ ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : Dict="<pad>" , __lowerCamelCase : Optional[int]="<mask>" , __lowerCamelCase : int=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : Any=None , **__lowerCamelCase : Dict , ) -> List[str]:
'''simple docstring'''
__lowercase = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
__lowercase = len(self.sp_model )
__lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
__lowercase = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowercase = src_lang if src_lang is not None else '''en_XX'''
__lowercase = self.lang_code_to_id[self._src_lang]
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> Tuple:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , __lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : str ) -> None:
'''simple docstring'''
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowercase = src_lang
__lowercase = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
__lowercase = self.convert_tokens_to_ids(A__ )
__lowercase = tgt_lang_id
return inputs
def UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : Any , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
__lowercase = ''''''.join(A__ ).replace(A__ , ' ' ).strip()
return out_string
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
A__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str = "en_XX" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "ro_RO" , **__lowerCamelCase : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self : str , __lowerCamelCase : Tuple ) -> None:
'''simple docstring'''
__lowercase = self.lang_code_to_id[src_lang]
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ) -> None:
'''simple docstring'''
__lowercase = self.lang_code_to_id[lang]
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
| 375 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__: Any = 0
__magic_name__: List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__: Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__: int = tuple[int, int]
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
__magic_name__ : Optional[int] = pos_x
__magic_name__ : str = pos_y
__magic_name__ : Optional[int] = (pos_y, pos_x)
__magic_name__ : List[str] = goal_x
__magic_name__ : Any = goal_y
__magic_name__ : Any = g_cost
__magic_name__ : Optional[int] = parent
__magic_name__ : Union[str, Any] = self.calculate_heuristic()
__magic_name__ : List[Any] = self.g_cost + self.h_cost
def __magic_name__ ( self ) -> float:
__magic_name__ : List[str] = self.pos_x - self.goal_x
__magic_name__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCAmelCase__ ) -> bool:
return self.f_cost < other.f_cost
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
__magic_name__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , A__ )
__magic_name__ : Dict = [self.start]
__magic_name__ : list[Node] = []
__magic_name__ : str = False
def __magic_name__ ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__magic_name__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
__magic_name__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
__magic_name__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __magic_name__ ( self , lowerCAmelCase__ ) -> list[Node]:
__magic_name__ : Optional[int] = []
for action in delta:
__magic_name__ : List[Any] = parent.pos_x + action[1]
__magic_name__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __magic_name__ ( self , lowerCAmelCase__ ) -> list[TPosition]:
__magic_name__ : Union[str, Any] = node
__magic_name__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__magic_name__ : Any = current_node.parent
path.reverse()
return path
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : str = AStar(A__ , A__ )
__magic_name__ : Optional[int] = AStar(A__ , A__ )
__magic_name__ : List[str] = False
def __magic_name__ ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__magic_name__ : int = self.fwd_astar.open_nodes.pop(0 )
__magic_name__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
__magic_name__ : Tuple = current_bwd_node
__magic_name__ : Optional[int] = current_fwd_node
__magic_name__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
__magic_name__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> list[TPosition]:
__magic_name__ : str = self.fwd_astar.retrace_path(A__ )
__magic_name__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
__magic_name__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__: Dict = (0, 0)
__magic_name__: int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__: str = time.time()
__magic_name__: Optional[int] = AStar(init, goal)
__magic_name__: Dict = a_star.search()
__magic_name__: Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__magic_name__: Optional[int] = time.time()
__magic_name__: Optional[int] = BidirectionalAStar(init, goal)
__magic_name__: Optional[Any] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 324 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688 | 0 |
def lowerCamelCase_(lowerCamelCase_ ) -> Tuple:
try:
UpperCAmelCase = float(lowerCAmelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase = decimal - int(lowerCAmelCase__ )
if fractional_part == 0:
return int(lowerCAmelCase__ ), 1
else:
UpperCAmelCase = len(str(lowerCAmelCase__ ).split("." )[1] )
UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase = 10**number_of_frac_digits
UpperCAmelCase = denominator, numerator
while True:
UpperCAmelCase = dividend % divisor
if remainder == 0:
break
UpperCAmelCase = divisor, remainder
UpperCAmelCase = numerator / divisor, denominator / divisor
return int(lowerCAmelCase__ ), int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 323 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : List[str] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_a : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a : Optional[Any] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__lowerCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
__lowerCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowercase , )
is not None
):
__lowerCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCAmelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCAmelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCAmelCase = True
if not attribute_used:
__lowerCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCAmelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCAmelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
__lowerCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCAmelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCAmelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCAmelCase = inspect.getsourcefile(lowercase )
__lowerCAmelCase = os.path.dirname(lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCAmelCase = [os.path.join(lowercase , lowercase ) for fn in os.listdir(lowercase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCAmelCase = []
for path in modeling_paths:
if os.path.isfile(lowercase ):
with open(lowercase ) as fp:
modeling_sources.append(fp.read() )
__lowerCAmelCase = []
for config_param, default_value in zip(lowercase , lowercase ):
# `attributes` here is all the variant names for `config_param`
__lowerCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase , lowercase , lowercase , lowercase ):
unused_attributes.append(attributes[0] )
return sorted(lowercase )
def _lowerCAmelCase ( ) -> int:
__lowerCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase : inspect.isclass(lowercase )
and issubclass(lowercase , lowercase )
and inspect.getmodule(lowercase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCAmelCase = check_config_attributes_being_used(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = unused_attributes
if len(lowercase ) > 0:
__lowerCAmelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowercase )
if __name__ == "__main__":
check_config_attributes()
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_a : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = set()
__lowerCAmelCase = []
def parse_line(lowercase ):
for line in fp:
if isinstance(lowercase , lowercase ):
__lowerCAmelCase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase ) > 0:
__lowerCAmelCase = """\n""".join(lowercase )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(lowercase )
buffer.clear()
continue
else:
__lowerCAmelCase = line.strip()
buffer.append(lowercase )
if from_gh:
for filename in os.listdir(lowercase ):
__lowerCAmelCase = os.path.join(lowercase , lowercase )
if not os.path.isdir(lowercase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase ) as fp:
parse_line(lowercase )
else:
try:
with zipfile.ZipFile(lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase ) as fp:
parse_line(lowercase )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
__lowerCAmelCase = set()
__lowerCAmelCase = [os.path.join(lowercase , lowercase ) for p in os.listdir(lowercase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase , lowercase ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
return values.split(""",""" )
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_a : str = parser.parse_args()
_a : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_a : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_a : List[str] = extract_warnings(args.output_dir, args.targets)
_a : List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : int = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
_a : Optional[int] = 1_0
_a : Optional[int] = 2_5_6
def _lowerCAmelCase ( lowercase ) -> Optional[MinHash]:
if len(lowercase ) < MIN_NUM_TOKENS:
return None
__lowerCAmelCase = MinHash(num_perm=lowercase )
for token in set(lowercase ):
min_hash.update(token.encode() )
return min_hash
def _lowerCAmelCase ( lowercase ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowercase ) if len(t.strip() ) > 0}
class _UpperCAmelCase :
def __init__( self,*,
__SCREAMING_SNAKE_CASE = 0.85,):
'''simple docstring'''
__lowerCAmelCase = duplication_jaccard_threshold
__lowerCAmelCase = NUM_PERM
__lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold,num_perm=self._num_perm )
__lowerCAmelCase = defaultdict(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self._index.query(__SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
__lowerCAmelCase = [base] + list(__SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
__lowerCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE )
return duplicate_clusters
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE,"""w""" ) as f:
json.dump(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( lowercase ) -> str:
__lowerCAmelCase , __lowerCAmelCase = element
__lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _lowerCAmelCase ( lowercase ) -> List[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowercase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
__lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase ) ) , max_queue_size=100 ) ):
di.add(lowercase , lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _lowerCAmelCase ( lowercase , lowercase ) -> float:
__lowerCAmelCase = get_tokens(lowercase )
__lowerCAmelCase = get_tokens(lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : List[str] = None
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
__lowerCAmelCase = []
for elementa in cluster:
__lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowercase , lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowerCAmelCase = 1
extremes.append(lowercase )
return extremes
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Dict:
global _shared_dataset
__lowerCAmelCase = dataset
__lowerCAmelCase = []
__lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowercase , lowercase , ) , total=len(lowercase ) , ):
extremes_list.append(lowercase )
return extremes_list
def _lowerCAmelCase ( lowercase , lowercase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__lowerCAmelCase = make_duplicate_clusters(lowercase , lowercase )
__lowerCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowerCAmelCase = {}
__lowerCAmelCase = find_extremes(lowercase , lowercase , lowercase )
for extremes in extremes_clusters:
for element in extremes:
__lowerCAmelCase = element
__lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
__lowerCAmelCase = dataset.filter(lambda lowercase , lowercase : idx not in remove_indices , with_indices=lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowerCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowerCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(lowercase )}' )
print(f'Number of duplicate clusters: {len(lowercase )}' )
print(f'Files in duplicate cluster: {len(lowercase )}' )
print(f'Unique files in duplicate cluster: {len(lowercase )}' )
print(f'Filtered dataset size: {len(lowercase )}' )
return ds_filter, duplicate_clusters
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_a : Union[str, Any] = get_logger(__name__)
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = (
os.path.join(__SCREAMING_SNAKE_CASE,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(__SCREAMING_SNAKE_CASE )
return os.path.join(self.extract_dir,hash_url_to_filename(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(__SCREAMING_SNAKE_CASE ) and not (os.path.isdir(__SCREAMING_SNAKE_CASE ) and os.listdir(__SCREAMING_SNAKE_CASE ))
)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__lowerCAmelCase = self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(__SCREAMING_SNAKE_CASE )
if self._do_extract(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
self.extractor.extract(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
return output_path
class _UpperCAmelCase ( lowerCAmelCase_ ):
@classmethod
@abstractmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
...
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
a : List[bytes] =[]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE,"""rb""" ) as f:
return f.read(__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = b"" ):
'''simple docstring'''
if not magic_number:
__lowerCAmelCase = max(len(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except OSError:
return False
return any(magic_number.startswith(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
class _UpperCAmelCase ( lowerCAmelCase_ ):
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def resolved(__SCREAMING_SNAKE_CASE ) -> str:
return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE ) )
def badpath(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ).startswith(__SCREAMING_SNAKE_CASE )
def badlink(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(__SCREAMING_SNAKE_CASE,os.path.dirname(info.name ) ) )
return badpath(info.linkname,base=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resolved(__SCREAMING_SNAKE_CASE )
for finfo in members:
if badpath(finfo.name,__SCREAMING_SNAKE_CASE ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tarfile.open(__SCREAMING_SNAKE_CASE )
tar_file.extractall(__SCREAMING_SNAKE_CASE,members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
tar_file.close()
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] =[b"""\x1F\x8B"""]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with gzip.open(__SCREAMING_SNAKE_CASE,"""rb""" ) as gzip_file:
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =[
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = b"" ):
'''simple docstring'''
if super().is_extractable(__SCREAMING_SNAKE_CASE,magic_number=__SCREAMING_SNAKE_CASE ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__SCREAMING_SNAKE_CASE,"""rb""" ) as fp:
__lowerCAmelCase = _EndRecData(__SCREAMING_SNAKE_CASE )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(__SCREAMING_SNAKE_CASE ) # CD is where we expect it to be
if len(__SCREAMING_SNAKE_CASE ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE,"""r""" ) as zip_file:
zip_file.extractall(__SCREAMING_SNAKE_CASE )
zip_file.close()
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any =[b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with lzma.open(__SCREAMING_SNAKE_CASE ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] =[b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rarfile.RarFile(__SCREAMING_SNAKE_CASE )
rf.extractall(__SCREAMING_SNAKE_CASE )
rf.close()
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] =[b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(__SCREAMING_SNAKE_CASE,"""rb""" ) as ifh, open(__SCREAMING_SNAKE_CASE,"""wb""" ) as ofh:
dctx.copy_stream(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int =[b"""\x42\x5A\x68"""]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with bza.open(__SCREAMING_SNAKE_CASE,"""rb""" ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Tuple =[b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE,"""r""" ) as archive:
archive.extractall(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any =[b"""\x04\x22\x4D\x18"""]
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__SCREAMING_SNAKE_CASE,"""rb""" ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] ={
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
return max(
len(__SCREAMING_SNAKE_CASE )
for extractor in cls.extractors.values()
if issubclass(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE,magic_number_length=__SCREAMING_SNAKE_CASE )
except OSError:
return b""
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""",category=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = cls.infer_extractor_format(__SCREAMING_SNAKE_CASE )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE ): # <Added version="2.4.0"/>
'''simple docstring'''
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE,magic_number=__SCREAMING_SNAKE_CASE ):
return extractor_format
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "deprecated",):
'''simple docstring'''
os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE ),exist_ok=__SCREAMING_SNAKE_CASE )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(__SCREAMING_SNAKE_CASE ).with_suffix(""".lock""" ) )
with FileLock(__SCREAMING_SNAKE_CASE ):
shutil.rmtree(__SCREAMING_SNAKE_CASE,ignore_errors=__SCREAMING_SNAKE_CASE )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""",category=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""",category=__SCREAMING_SNAKE_CASE,)
for extractor in cls.extractors.values():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE ):
return extractor.extract(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
_a : List[Any] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_a : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_a : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = botoa.client("""iam""" )
__lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase , AssumeRolePolicyDocument=json.dumps(lowercase , indent=2 ) )
__lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(lowercase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def _lowerCAmelCase ( lowercase ) -> Any:
__lowerCAmelCase = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowercase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowercase , )
__lowerCAmelCase = None
if credentials_configuration == 0:
__lowerCAmelCase = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__lowerCAmelCase = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__lowerCAmelCase = _ask_field("""AWS Access Key ID: """ )
__lowerCAmelCase = aws_access_key_id
__lowerCAmelCase = _ask_field("""AWS Secret Access Key: """ )
__lowerCAmelCase = aws_secret_access_key
__lowerCAmelCase = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__lowerCAmelCase = aws_region
__lowerCAmelCase = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowercase , )
if role_management == 0:
__lowerCAmelCase = _ask_field("""Enter your IAM role name: """ )
else:
__lowerCAmelCase = """accelerate_sagemaker_execution_role"""
print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(lowercase )
__lowerCAmelCase = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
__lowerCAmelCase = None
if is_custom_docker_image:
__lowerCAmelCase = _ask_field("""Enter your Docker image: """ , lambda lowercase : str(lowercase ).lower() )
__lowerCAmelCase = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
__lowerCAmelCase = None
if is_sagemaker_inputs_enabled:
__lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowercase : str(lowercase ).lower() , )
__lowerCAmelCase = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
__lowerCAmelCase = None
if is_sagemaker_metrics_enabled:
__lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowercase : str(lowercase ).lower() , )
__lowerCAmelCase = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__lowerCAmelCase = {}
__lowerCAmelCase = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__lowerCAmelCase = """dynamo_"""
__lowerCAmelCase = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__lowerCAmelCase = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__lowerCAmelCase = _ask_options(
"""Which mode do you want to use?""" , lowercase , lambda lowercase : TORCH_DYNAMO_MODES[int(lowercase )] , default="""default""" , )
__lowerCAmelCase = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
__lowerCAmelCase = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase , error_message="""Please enter yes or no.""" , )
__lowerCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__lowerCAmelCase = _ask_options(
lowercase , lowercase , lambda lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__lowerCAmelCase = _ask_field(lowercase , lambda lowercase : str(lowercase ).lower() , default="""ml.p3.2xlarge""" )
__lowerCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__lowerCAmelCase = _ask_field(
"""How many machines do you want use? [1]: """ , lowercase , default=1 , )
__lowerCAmelCase = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowercase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase , use_cpu=lowercase , dynamo_config=lowercase , eca_instance_type=lowercase , profile=lowercase , region=lowercase , iam_role_name=lowercase , mixed_precision=lowercase , num_machines=lowercase , sagemaker_inputs_file=lowercase , sagemaker_metrics_file=lowercase , )
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowerCAmelCase ( ) -> Any:
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 689 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""MobileViTFeatureExtractor"""]
_a : str = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
_a : List[str] = tuple[int, int]
class _UpperCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = set()
def lowerCamelCase__ ( self ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.elements ) == 0
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements,(priority, item) )
self.set.add(__SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
__lowerCAmelCase = []
((__lowerCAmelCase) , (__lowerCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowerCAmelCase) , (__lowerCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements,(pro, xxx) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if item in self.set:
self.set.remove(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = []
((__lowerCAmelCase) , (__lowerCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowerCAmelCase) , (__lowerCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements,(prito, yyy) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.elements[0][1]
def lowerCamelCase__ ( self ):
'''simple docstring'''
((__lowerCAmelCase) , (__lowerCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__SCREAMING_SNAKE_CASE )
return (priority, item)
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
# euclidean distance
__lowerCAmelCase = np.array(lowercase )
__lowerCAmelCase = np.array(lowercase )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
# integer division by time variable
return consistent_heuristic(lowercase , lowercase ) // t
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__lowerCAmelCase = g_function[start] + Wa * heuristics[i](lowercase , lowercase )
return ans
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = np.chararray((n, n) )
for i in range(lowercase ):
for j in range(lowercase ):
__lowerCAmelCase = """*"""
for i in range(lowercase ):
for j in range(lowercase ):
if (j, (n - 1) - i) in blocks:
__lowerCAmelCase = """#"""
__lowerCAmelCase = """-"""
__lowerCAmelCase = back_pointer[goal]
while x != start:
((__lowerCAmelCase) , (__lowerCAmelCase)) = x
# print(x)
__lowerCAmelCase = """-"""
__lowerCAmelCase = back_pointer[x]
__lowerCAmelCase = """-"""
for i in range(lowercase ):
for j in range(lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowerCAmelCase = back_pointer[goal]
while x != start:
print(lowercase , end=""" """ )
__lowerCAmelCase = back_pointer[x]
print(lowercase )
sys.exit()
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Union[str, Any]:
for itera in range(lowercase ):
open_list[itera].remove_element(lowercase )
# print("s", s)
# print("j", j)
((__lowerCAmelCase) , (__lowerCAmelCase)) = s
__lowerCAmelCase = (x - 1, y)
__lowerCAmelCase = (x + 1, y)
__lowerCAmelCase = (x, y + 1)
__lowerCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase )
__lowerCAmelCase = -1
__lowerCAmelCase = float("""inf""" )
if valid(lowercase ) and g_function[neighbours] > g_function[s] + 1:
__lowerCAmelCase = g_function[s] + 1
__lowerCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase , key(lowercase , 0 , lowercase , lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase ):
if key(lowercase , lowercase , lowercase , lowercase ) <= Wa * key(
lowercase , 0 , lowercase , lowercase ):
open_list[j].put(
lowercase , key(lowercase , lowercase , lowercase , lowercase ) )
def _lowerCAmelCase ( ) -> int:
__lowerCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_a : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_a : Optional[int] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
_a : List[str] = make_common_ground()
_a : int = blocks_blk
# hyper parameters
_a : int = 1
_a : str = 1
_a : Optional[Any] = 2_0
_a : Union[str, Any] = 3 # one consistent and two other inconsistent
# start and end destination
_a : Optional[Any] = (0, 0)
_a : Tuple = (n - 1, n - 1)
_a : List[Any] = 1
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
__lowerCAmelCase = {start: 0, goal: float("""inf""" )}
__lowerCAmelCase = {start: -1, goal: -1}
__lowerCAmelCase = []
__lowerCAmelCase = set()
for i in range(lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase , key(lowercase , lowercase , lowercase , lowercase ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowercase , lowercase , lowercase )
else:
__lowerCAmelCase , __lowerCAmelCase = open_list[i].top_show()
visited.add(lowercase )
expand_state(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
close_list_inad.append(lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowercase , lowercase , lowercase )
else:
__lowerCAmelCase = open_list[0].top_show()
visited.add(lowercase )
expand_state(
lowercase , 0 , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
close_list_anchor.append(lowercase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 689 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_a : int = 4
_a : Dict = 3
class _UpperCAmelCase ( lowerCAmelCase_ ):
pass
def _lowerCAmelCase ( lowercase ) -> List[Any]:
for shard in shards:
for i in range(lowercase ):
yield {"i": i, "shard": shard}
def _lowerCAmelCase ( ) -> Optional[Any]:
__lowerCAmelCase = int(os.environ["""RANK"""] )
__lowerCAmelCase = int(os.environ["""WORLD_SIZE"""] )
__lowerCAmelCase = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowercase )
parser.add_argument("""--local_rank""" , type=lowercase )
parser.add_argument("""--num_workers""" , type=lowercase , default=0 )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.streaming
__lowerCAmelCase = args.num_workers
__lowerCAmelCase = {"""shards""": [f'shard_{shard_idx}' for shard_idx in range(lowercase )]}
__lowerCAmelCase = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase )
if not streaming:
__lowerCAmelCase = Dataset.from_list(list(lowercase ) )
__lowerCAmelCase = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase )
__lowerCAmelCase = torch.utils.data.DataLoader(lowercase , num_workers=lowercase )
__lowerCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__lowerCAmelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__lowerCAmelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_a : List[Any] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Tuple = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Union[str, Any] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
__lowerCAmelCase = FileLock(str(tmpdir / """foo.lock""" ) )
__lowerCAmelCase = FileLock(str(tmpdir / """foo.lock""" ) )
__lowerCAmelCase = 0.01
with locka.acquire():
with pytest.raises(lowercase ):
__lowerCAmelCase = time.time()
locka.acquire(lowercase )
assert time.time() - _start > timeout
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
__lowerCAmelCase = """a""" * 1000 + """.lock"""
__lowerCAmelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowerCAmelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase ):
locka.acquire(0 )
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Union[str, Any] = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : Tuple = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase ) )
] # the reference grid
__lowerCAmelCase = 1
__lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase ) )
] # the action grid
__lowerCAmelCase = init[0]
__lowerCAmelCase = init[1]
__lowerCAmelCase = 0
__lowerCAmelCase = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase = [[f, g, x, y]]
__lowerCAmelCase = False # flag that is set when search is complete
__lowerCAmelCase = False # flag set if we can't find expand
while not found and not resign:
if len(lowercase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase = cell.pop()
__lowerCAmelCase = next_cell[2]
__lowerCAmelCase = next_cell[3]
__lowerCAmelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase = True
else:
for i in range(len(lowercase ) ): # to try out different valid actions
__lowerCAmelCase = x + DIRECTIONS[i][0]
__lowerCAmelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase = g + cost
__lowerCAmelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase = 1
__lowerCAmelCase = i
__lowerCAmelCase = []
__lowerCAmelCase = goal[0]
__lowerCAmelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase = xa
__lowerCAmelCase = ya
invpath.append([x, y] )
__lowerCAmelCase = []
for i in range(len(lowercase ) ):
path.append(invpath[len(lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
_a : List[str] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_a : Optional[int] = [0, 0]
# all coordinates are given in format [y,x]
_a : List[Any] = [len(grid) - 1, len(grid[0]) - 1]
_a : List[str] = 1
# the cost map which pushes the path closer to the goal
_a : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_a : Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_a : Tuple = 9_9
_a ,_a : List[Any] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a : int = parser.parse_args()
if args.model_type == "bert":
_a : str = BertForMaskedLM.from_pretrained(args.model_name)
_a : List[str] = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a : Tuple = model.state_dict()
_a : Tuple = {}
for w in ["word_embeddings", "position_embeddings"]:
_a : Optional[Any] = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
_a : Any = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
_a : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_a : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
_a : Optional[int] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
_a : List[Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
_a : Optional[Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
_a : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
_a : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
_a : Union[str, Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
_a : Any = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
_a : Optional[int] = state_dict["""cls.predictions.decoder.weight"""]
_a : List[str] = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a : Tuple = state_dict[f'cls.predictions.transform.dense.{w}']
_a : List[Any] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 689 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_a : int = None
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_a : Optional[int] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_a : Optional[Any] = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
_a : Dict = """▁"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str =AlbertTokenizer
def __init__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE="[CLS]",__SCREAMING_SNAKE_CASE="[SEP]",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="[SEP]",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="[CLS]",__SCREAMING_SNAKE_CASE="[MASK]",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE,lstrip=__SCREAMING_SNAKE_CASE,rstrip=__SCREAMING_SNAKE_CASE,normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE,tokenizer_file=__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,remove_space=__SCREAMING_SNAKE_CASE,keep_accents=__SCREAMING_SNAKE_CASE,bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=("""DownBlock2D""", """AttnDownBlock2D"""),up_block_types=("""AttnUpBlock2D""", """UpBlock2D"""),)
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2,output_type="""numpy""",generator=__SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2,output_type="""numpy""",generator=__SCREAMING_SNAKE_CASE,return_dict=__SCREAMING_SNAKE_CASE )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """google/ncsnpp-church-256"""
__lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=10,output_type="""numpy""",generator=__SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
__lowerCAmelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__lowerCAmelCase = n - k
# Calculate C(n,k)
for i in range(lowercase ):
result *= n - i
result //= i + 1
return result
def _lowerCAmelCase ( lowercase ) -> int:
return binomial_coefficient(2 * node_count , lowercase ) // (node_count + 1)
def _lowerCAmelCase ( lowercase ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
__lowerCAmelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _lowerCAmelCase ( lowercase ) -> int:
return catalan_number(lowercase ) * factorial(lowercase )
if __name__ == "__main__":
_a : List[Any] = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
_a : List[Any] = 6_5_5_2_1
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for plain_chr in plain_text:
__lowerCAmelCase = (a + ord(lowercase )) % MOD_ADLER
__lowerCAmelCase = (b + a) % MOD_ADLER
return (b << 16) | a
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCAmelCase ( lowercase = True , *lowercase , **lowercase ) -> List[str]:
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowerCAmelCase = False
if main_process_only:
__lowerCAmelCase = PartialState().local_process_index == 0
return _tqdm(*lowercase , **lowercase , disable=lowercase )
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> list:
if n_term == "":
return []
__lowerCAmelCase = []
for temp in range(int(lowercase ) ):
series.append(f'1/{temp + 1}' if series else """1""" )
return series
if __name__ == "__main__":
_a : List[str] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : List[Any] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_a : List[str] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_a : int = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(lowercase )
return pairs
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[int] =VOCAB_FILES_NAMES
a : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<mask>",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = vocab_file
__lowerCAmelCase = merges_file
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
self.add_from_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE,encoding="""utf-8""" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("""\n""" )[:-1]
__lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__SCREAMING_SNAKE_CASE,key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = word.index(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """@@ """.join(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"""\S+\n?""",__SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE,self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """ """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ ""","""""" ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file,__SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
with open(__SCREAMING_SNAKE_CASE,"""r""",encoding="""utf-8""" ) as fd:
self.add_from_file(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
__lowerCAmelCase = f.readlines()
for lineTmp in lines:
__lowerCAmelCase = lineTmp.strip()
__lowerCAmelCase = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
__lowerCAmelCase = line[:idx]
__lowerCAmelCase = len(self.encoder )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[int] = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowercase ) -> int:
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase , __lowerCAmelCase = (
max_excluding + num,
max(lowercase , lowercase ),
)
return max(lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
from math import ceil
def _lowerCAmelCase ( lowercase = 1001 ) -> int:
__lowerCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowerCAmelCase = 2 * i + 1
__lowerCAmelCase = 2 * i
__lowerCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_a : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 6008_5147_5143 ) -> int:
try:
__lowerCAmelCase = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Optional[Any] = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any ="""speech_to_text"""
a : Any =["""past_key_values"""]
a : str ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self,__SCREAMING_SNAKE_CASE=1_00_00,__SCREAMING_SNAKE_CASE=12,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=2_56,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=60_00,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=(5, 5),__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=80,__SCREAMING_SNAKE_CASE=1,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = list(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = conv_channels
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE,bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,is_encoder_decoder=__SCREAMING_SNAKE_CASE,decoder_start_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
| 689 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
_a : Tuple = False
_a : Any = False
def _lowerCAmelCase ( lowercase ) -> Dict:
return TrainCommand(lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = parser.add_parser("""train""",help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""",type=__SCREAMING_SNAKE_CASE,required=__SCREAMING_SNAKE_CASE,help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""",)
train_parser.add_argument(
"""--column_label""",type=__SCREAMING_SNAKE_CASE,default=0,help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""",type=__SCREAMING_SNAKE_CASE,default=1,help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""",type=__SCREAMING_SNAKE_CASE,default=2,help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""",action="""store_true""",help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""",type=__SCREAMING_SNAKE_CASE,default="""""",help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""",type=__SCREAMING_SNAKE_CASE,default=0.1,help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""",)
train_parser.add_argument("""--output""",type=__SCREAMING_SNAKE_CASE,default="""./""",help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""",type=__SCREAMING_SNAKE_CASE,default="""text_classification""",help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""",type=__SCREAMING_SNAKE_CASE,default="""bert-base-uncased""",help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""",type=__SCREAMING_SNAKE_CASE,default=32,help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""",type=__SCREAMING_SNAKE_CASE,default=64,help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""",type=__SCREAMING_SNAKE_CASE,default=3e-5,help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""",type=__SCREAMING_SNAKE_CASE,default=1e-08,help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = logging.get_logger("""transformers-cli/training""" )
__lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output,exist_ok=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = args.output
__lowerCAmelCase = args.column_label
__lowerCAmelCase = args.column_text
__lowerCAmelCase = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
__lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
__lowerCAmelCase = Processor.create_from_csv(
args.train_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
__lowerCAmelCase = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
__lowerCAmelCase = Processor.create_from_csv(
args.validation_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
__lowerCAmelCase = args.validation_split
__lowerCAmelCase = args.train_batch_size
__lowerCAmelCase = args.valid_batch_size
__lowerCAmelCase = args.learning_rate
__lowerCAmelCase = args.adam_epsilon
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCamelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset,validation_data=self.valid_dataset,validation_split=self.validation_split,learning_rate=self.learning_rate,adam_epsilon=self.adam_epsilon,train_batch_size=self.train_batch_size,valid_batch_size=self.valid_batch_size,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 689 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_a : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = list(model.children() )[:-2]
__lowerCAmelCase = nn.Sequential(*__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.flatten(__SCREAMING_SNAKE_CASE,start_dim=2 )
__lowerCAmelCase = out.transpose(1,2 ).contiguous()
return out # BxNx2048
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
__lowerCAmelCase = os.path.dirname(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer
__lowerCAmelCase = labels
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""],add_special_tokens=__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = sentence[0], sentence[1:-1], sentence[-1]
__lowerCAmelCase = sentence[: self.max_seq_length]
__lowerCAmelCase = torch.zeros(self.n_classes )
__lowerCAmelCase = 1
__lowerCAmelCase = Image.open(os.path.join(self.data_dir,self.data[index]["""img"""] ) ).convert("""RGB""" )
__lowerCAmelCase = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def _lowerCAmelCase ( lowercase ) -> str:
__lowerCAmelCase = [len(row["""sentence"""] ) for row in batch]
__lowerCAmelCase , __lowerCAmelCase = len(lowercase ), max(lowercase )
__lowerCAmelCase = torch.zeros(lowercase , lowercase , dtype=torch.long )
__lowerCAmelCase = torch.zeros(lowercase , lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowercase , lowercase ) ):
__lowerCAmelCase = input_row["""sentence"""]
__lowerCAmelCase = 1
__lowerCAmelCase = torch.stack([row["""image"""] for row in batch] )
__lowerCAmelCase = torch.stack([row["""label"""] for row in batch] )
__lowerCAmelCase = torch.stack([row["""image_start_token"""] for row in batch] )
__lowerCAmelCase = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _lowerCAmelCase ( ) -> Tuple:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _lowerCAmelCase ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 689 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCAmelCase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
import random
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [ord(__SCREAMING_SNAKE_CASE ) for i in text]
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in plain:
__lowerCAmelCase = random.randint(1,3_00 )
__lowerCAmelCase = (i + k) * k
cipher.append(__SCREAMING_SNAKE_CASE )
key.append(__SCREAMING_SNAKE_CASE )
return cipher, key
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__SCREAMING_SNAKE_CASE ) )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a ,_a : Optional[int] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCAmelCase ( ) -> int:
__lowerCAmelCase = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowercase )
DownloadCommand.register_subcommand(lowercase )
EnvironmentCommand.register_subcommand(lowercase )
RunCommand.register_subcommand(lowercase )
ServeCommand.register_subcommand(lowercase )
UserCommands.register_subcommand(lowercase )
AddNewModelCommand.register_subcommand(lowercase )
AddNewModelLikeCommand.register_subcommand(lowercase )
LfsCommands.register_subcommand(lowercase )
PTtoTFCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 1 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_a : Tuple = get_tests_dir("""fixtures/dummy-config.json""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 0
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE,"""fake-roberta""" )
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE,"""config.json""" ),"""w""" ) as f:
f.write(json.dumps({} ) )
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(type(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
try:
AutoConfig.register("""custom""",__SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register("""model""",__SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register("""bert""",__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE,"""bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCAmelCase = AutoConfig.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE,revision="""aaaaaa""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""",):
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""",trust_remote_code=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""",trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE,trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__,"""NewModelConfig""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any ="""new-model"""
try:
AutoConfig.register("""new-model""",__SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""",trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""",trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""num_encoder_blocks""" ) )
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=[2, 2, 2, 2],__SCREAMING_SNAKE_CASE=[8, 4, 2, 1],__SCREAMING_SNAKE_CASE=[16, 32, 64, 1_28],__SCREAMING_SNAKE_CASE=[1, 4, 8, 16],__SCREAMING_SNAKE_CASE=[1, 2, 4, 8],__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_encoder_blocks
__lowerCAmelCase = sr_ratios
__lowerCAmelCase = depths
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = downsampling_rates
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size],self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size,num_channels=self.num_channels,num_encoder_blocks=self.num_encoder_blocks,depths=self.depths,hidden_sizes=self.hidden_sizes,num_attention_heads=self.num_attention_heads,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,initializer_range=self.initializer_range,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = SegformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = __lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = SegformerForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss,0.0 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = SegformerForSemanticSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = torch.randint(0,1,(self.batch_size, self.image_size, self.image_size) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss,0.0 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Optional[Any] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
a : Tuple =(
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Dict =True
a : Any =False
a : str =False
a : List[str] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SegformerModelTester(self )
__lowerCAmelCase = SegformerConfigTester(self,config_class=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__SCREAMING_SNAKE_CASE )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
__lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
__lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
__lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],)
# verify the last attentions (last block, last layer)
__lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
__lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ),[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len],)
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
__lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
__lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],)
def lowerCamelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ),[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],)
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,return_labels=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SegformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12),keep_ratio=__SCREAMING_SNAKE_CASE,align=__SCREAMING_SNAKE_CASE,do_random_crop=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12),keep_ratio=__SCREAMING_SNAKE_CASE,align=__SCREAMING_SNAKE_CASE,do_random_crop=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3],__SCREAMING_SNAKE_CASE,atol=1e-1 ) )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12),keep_ratio=__SCREAMING_SNAKE_CASE,align=__SCREAMING_SNAKE_CASE,do_random_crop=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs.logits.detach().cpu()
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE,target_sizes=[(5_00, 3_00)] )
__lowerCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape,__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
import numpy as np
class _UpperCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCAmelCase = (0, 0)
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def __eq__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.position == cell.position
def lowerCamelCase__ ( self ):
'''simple docstring'''
print(self.position )
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE=(5, 5) ):
'''simple docstring'''
__lowerCAmelCase = np.zeros(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = world_size[0]
__lowerCAmelCase = world_size[1]
def lowerCamelCase__ ( self ):
'''simple docstring'''
print(self.w )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCAmelCase = cell.position[0]
__lowerCAmelCase = cell.position[1]
__lowerCAmelCase = []
for n in neughbour_cord:
__lowerCAmelCase = current_x + n[0]
__lowerCAmelCase = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCAmelCase = Cell()
__lowerCAmelCase = (x, y)
__lowerCAmelCase = cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = []
_open.append(lowercase )
while _open:
__lowerCAmelCase = np.argmin([n.f for n in _open] )
__lowerCAmelCase = _open[min_f]
_closed.append(_open.pop(lowercase ) )
if current == goal:
break
for n in world.get_neigbours(lowercase ):
for c in _closed:
if c == n:
continue
__lowerCAmelCase = current.g + 1
__lowerCAmelCase , __lowerCAmelCase = n.position
__lowerCAmelCase , __lowerCAmelCase = goal.position
__lowerCAmelCase = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCAmelCase = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase )
__lowerCAmelCase = []
while current.parent is not None:
path.append(current.position )
__lowerCAmelCase = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_a : int = Gridworld()
# Start position and goal
_a : Optional[int] = Cell()
_a : int = (0, 0)
_a : Dict = Cell()
_a : Optional[int] = (4, 4)
print(f'path from {start.position} to {goal.position}')
_a : Dict = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_a : Tuple = 1
print(world.w)
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.