code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( A_ ):
snake_case_ = ["input_features", "attention_mask"]
def __init__( self, lowercase_=80, lowercase_=16000, lowercase_=80, lowercase_=0.0, lowercase_=True, lowercase_=True, lowercase_=True, **lowercase_, ) -> int:
super().__init__(feature_size=snake_case__, sampling_rate=snake_case__, padding_value=snake_case__, **snake_case__ )
snake_case = num_mel_bins
snake_case = do_ceptral_normalize
snake_case = normalize_means
snake_case = normalize_vars
snake_case = True
def _lowerCamelCase ( self, lowercase_, ) -> np.ndarray:
snake_case = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
snake_case = torch.from_numpy(snake_case__ ).unsqueeze(0 )
snake_case = ta_kaldi.fbank(snake_case__, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( lowercase_, lowercase_, lowercase_ = True, lowercase_ = True, lowercase_ = 0.0, ) -> np.ndarray:
if normalize_means:
snake_case = x[:input_length].mean(axis=0 )
snake_case = np.subtract(snake_case__, snake_case__ )
if normalize_vars:
snake_case = x[:input_length].std(axis=0 )
snake_case = np.divide(snake_case__, snake_case__ )
if input_length < x.shape[0]:
snake_case = padding_value
# make sure array is in float32
snake_case = x.astype(np.floataa )
return x
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[np.ndarray]:
snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case__, snake_case__, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(snake_case__, snake_case__ )
]
def __call__( self, lowercase_, lowercase_ = False, lowercase_ = None, lowercase_ = False, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case = isinstance(snake_case__, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case = is_batched_numpy or (
isinstance(snake_case__, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case = [np.asarray(snake_case__, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__, np.ndarray ):
snake_case = np.asarray(snake_case__, dtype=np.floataa )
elif isinstance(snake_case__, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case = [raw_speech]
# extract fbank features
snake_case = [self._extract_fbank_features(snake_case__ ) for waveform in raw_speech]
# convert into correct format for padding
snake_case = BatchFeature({'input_features': features} )
snake_case = self.pad(
snake_case__, padding=snake_case__, max_length=snake_case__, truncation=snake_case__, pad_to_multiple_of=snake_case__, return_attention_mask=snake_case__, **snake_case__, )
# make sure list is in array format
snake_case = padded_inputs.get('input_features' )
if isinstance(input_features[0], snake_case__ ):
snake_case = [np.asarray(snake_case__, dtype=np.floataa ) for feature in input_features]
snake_case = padded_inputs.get('attention_mask' )
if attention_mask is not None:
snake_case = [np.asarray(snake_case__, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case = (
np.array(snake_case__, dtype=np.intaa )
if self._get_padding_strategies(snake_case__, max_length=snake_case__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case = self.normalize(
padded_inputs['input_features'], attention_mask=snake_case__ )
if return_tensors is not None:
snake_case = padded_inputs.convert_to_tensors(snake_case__ )
return padded_inputs
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case_ = MvpTokenizer
snake_case_ = MvpTokenizerFast
snake_case_ = True
snake_case_ = filter_roberta_detectors
def _lowerCamelCase ( self ) -> str:
super().setUp()
snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case = dict(zip(_a, range(len(_a ) ) ) )
snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case = {'unk_token': '<unk>'}
snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def _lowerCamelCase ( self, **lowercase_ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_a )
def _lowerCamelCase ( self, **lowercase_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **_a )
def _lowerCamelCase ( self, lowercase_ ) -> Any:
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self ) -> List[Any]:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _lowerCamelCase ( self ) -> List[str]:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(_a, max_length=len(_a ), padding=_a, return_tensors='pt' )
self.assertIsInstance(_a, _a )
self.assertEqual((2, 9), batch.input_ids.shape )
self.assertEqual((2, 9), batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(_a, _a )
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(_a, padding=_a, return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids', _a )
self.assertIn('attention_mask', _a )
self.assertNotIn('labels', _a )
self.assertNotIn('decoder_attention_mask', _a )
@require_torch
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(text_target=_a, max_length=32, padding='max_length', return_tensors='pt' )
self.assertEqual(32, targets['input_ids'].shape[1] )
@require_torch
def _lowerCamelCase ( self ) -> List[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'], padding=_a, truncation=_a, return_tensors='pt' )
self.assertIsInstance(_a, _a )
self.assertEqual(batch.input_ids.shape, (2, 1024) )
@require_torch
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = ['A long paragraph for summarization.']
snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(_a, text_target=_a, return_tensors='pt' )
snake_case = inputs['input_ids']
snake_case = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowerCamelCase ( self ) -> Dict:
pass
def _lowerCamelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = self.rust_tokenizer_class.from_pretrained(_a, **_a )
snake_case = self.tokenizer_class.from_pretrained(_a, **_a )
snake_case = 'A, <mask> AllenNLP sentence.'
snake_case = tokenizer_r.encode_plus(_a, add_special_tokens=_a, return_token_type_ids=_a )
snake_case = tokenizer_p.encode_plus(_a, add_special_tokens=_a, return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_a, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_a, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __magic_name__ ( A ) -> Any:
snake_case = []
for i in range(len(lowercase__ ) ):
snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def __magic_name__ ( A , A ) -> Union[str, Any]:
snake_case = []
for _ in range(lowercase__ ):
# Create output image
snake_case = Image.new('RGB' , (len(cells[0] ), len(lowercase__ )) )
snake_case = img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
snake_case = 2_5_5 - cells[y][x] * 2_5_5
snake_case = (colour, colour, colour)
# Save image
images.append(lowercase__ )
snake_case = new_generation(lowercase__ )
return images
if __name__ == "__main__":
lowerCAmelCase_ = generate_images(GLIDER, 1_6)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( A ) -> Union[str, Any]:
snake_case = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
snake_case = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
snake_case = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case = key[key.find('patch_embed' ) + len('patch_embed' )]
snake_case = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
snake_case = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
snake_case = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
snake_case = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
snake_case = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
snake_case = key[key.find('block' ) + len('block' )]
snake_case = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
snake_case = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
snake_case = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
snake_case = key.replace('attn' , 'attention.self' )
if "fc1" in key:
snake_case = key.replace('fc1' , 'dense1' )
if "fc2" in key:
snake_case = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
snake_case = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
snake_case = key.replace('linear_fuse.conv' , 'linear_fuse' )
snake_case = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case = key[key.find('linear_c' ) + len('linear_c' )]
snake_case = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if "bot_conv" in key:
snake_case = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
snake_case = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
snake_case = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
snake_case = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
snake_case = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
snake_case = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
snake_case = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
snake_case = key.replace('module.last_layer_depth' , 'head.head' )
snake_case = value
return new_state_dict
def __magic_name__ ( A , A ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case = kv_weight[
: config.hidden_sizes[i], :
]
snake_case = kv_bias[: config.hidden_sizes[i]]
snake_case = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case = kv_bias[config.hidden_sizes[i] :]
def __magic_name__ ( ) -> Any:
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def __magic_name__ ( A , A , A=False , A=None ) -> Optional[int]:
snake_case = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
snake_case = GLPNImageProcessor()
# prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
snake_case = torch.load(a_ , map_location=torch.device('cpu' ) )
# rename keys
snake_case = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
snake_case = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
snake_case = model(a_ )
snake_case = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
snake_case = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
snake_case = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
snake_case = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a_ , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=a_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=a_ , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
lowerCAmelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A , A ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import os
def __magic_name__ ( ) -> Optional[int]:
with open(os.path.dirname(lowerCAmelCase__ ) + '/p022_names.txt' ) as file:
snake_case = str(file.readlines()[0] )
snake_case = names.replace('\"' , '' ).split(',' )
names.sort()
snake_case = 0
snake_case = 0
for i, name in enumerate(lowerCAmelCase__ ):
for letter in name:
name_score += ord(lowerCAmelCase__ ) - 6_4
total_score += (i + 1) * name_score
snake_case = 0
return total_score
if __name__ == "__main__":
print(solution())
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
import math
lowerCAmelCase_ = 1_0
lowerCAmelCase_ = 7
lowerCAmelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def __magic_name__ ( A = 2_0 ) -> str:
snake_case = math.comb(a_ , a_ )
snake_case = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a_ )
snake_case = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ChineseCLIPFeatureExtractor']
lowerCAmelCase_ = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __magic_name__ ( A = "mumbai" ) -> Optional[Any]:
snake_case = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
snake_case = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
snake_case = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __magic_name__ ( A , A=0.999 , A="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case = []
for i in range(__SCREAMING_SNAKE_CASE ):
snake_case = i / num_diffusion_timesteps
snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__SCREAMING_SNAKE_CASE ) / alpha_bar_fn(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
return torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ):
snake_case_ = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ = 2
@register_to_config
def __init__( self, lowercase_ = 1000, lowercase_ = 0.00_085, lowercase_ = 0.012, lowercase_ = "linear", lowercase_ = None, lowercase_ = "epsilon", lowercase_ = False, lowercase_ = False, lowercase_ = 1.0, lowercase_ = "linspace", lowercase_ = 0, ) -> Any:
if trained_betas is not None:
snake_case = torch.tensor(_lowerCAmelCase, dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case = torch.linspace(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case = (
torch.linspace(beta_start**0.5, beta_end**0.5, _lowerCAmelCase, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case = betas_for_alpha_bar(_lowerCAmelCase, alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case = betas_for_alpha_bar(_lowerCAmelCase, alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case = 1.0 - self.betas
snake_case = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
snake_case = use_karras_sigmas
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
if schedule_timesteps is None:
snake_case = self.timesteps
snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
snake_case = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self ) -> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self, lowercase_, lowercase_, ) -> Optional[Any]:
snake_case = self.index_for_timestep(_lowerCAmelCase )
snake_case = self.sigmas[step_index]
snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, ) -> Union[str, Any]:
snake_case = num_inference_steps
snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case = np.linspace(0, num_train_timesteps - 1, _lowerCAmelCase, dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(0, _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(_lowerCAmelCase, 0, -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case = np.log(_lowerCAmelCase )
snake_case = np.interp(_lowerCAmelCase, np.arange(0, len(_lowerCAmelCase ) ), _lowerCAmelCase )
if self.config.use_karras_sigmas:
snake_case = self._convert_to_karras(in_sigmas=_lowerCAmelCase, num_inference_steps=self.num_inference_steps )
snake_case = np.array([self._sigma_to_t(_lowerCAmelCase, _lowerCAmelCase ) for sigma in sigmas] )
snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
snake_case = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case = torch.from_numpy(_lowerCAmelCase )
snake_case = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith('mps' ):
# mps does not support float64
snake_case = timesteps.to(_lowerCAmelCase, dtype=torch.floataa )
else:
snake_case = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
snake_case = None
snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case = defaultdict(_lowerCAmelCase )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Optional[int]:
# get log sigma
snake_case = np.log(_lowerCAmelCase )
# get distribution
snake_case = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case = np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case = low_idx + 1
snake_case = log_sigmas[low_idx]
snake_case = log_sigmas[high_idx]
# interpolate sigmas
snake_case = (low - log_sigma) / (low - high)
snake_case = np.clip(_lowerCAmelCase, 0, 1 )
# transform interpolation to time range
snake_case = (1 - w) * low_idx + w * high_idx
snake_case = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case = in_sigmas[-1].item()
snake_case = in_sigmas[0].item()
snake_case = 7.0 # 7.0 is the value used in the paper
snake_case = np.linspace(0, 1, _lowerCAmelCase )
snake_case = sigma_min ** (1 / rho)
snake_case = sigma_max ** (1 / rho)
snake_case = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self ) -> Any:
return self.dt is None
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = True, ) -> Optional[Any]:
snake_case = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
snake_case = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case = self.sigmas[step_index - 1]
snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case = 0
snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case = sigma_hat if self.state_in_first_order else sigma_next
snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case = sigma_hat if self.state_in_first_order else sigma_next
snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case = sigma_next - sigma_hat
# store for 2nd order step
snake_case = derivative
snake_case = dt
snake_case = sample
else:
# 2. 2nd order / Heun's method
snake_case = (sample - pred_original_sample) / sigma_next
snake_case = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case = self.dt
snake_case = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case = None
snake_case = None
snake_case = None
snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
snake_case = self.timesteps.to(original_samples.device, dtype=torch.floataa )
snake_case = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
snake_case = self.timesteps.to(original_samples.device )
snake_case = timesteps.to(original_samples.device )
snake_case = [self.index_for_timestep(_lowerCAmelCase, _lowerCAmelCase ) for t in timesteps]
snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case = sigma.unsqueeze(-1 )
snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
import mpmath # for roots of unity
import numpy as np
class lowerCamelCase :
def __init__( self, lowercase_=None, lowercase_=None ) -> int:
# Input as list
snake_case = list(poly_a or [0] )[:]
snake_case = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case = len(self.polyB )
# Add 0 to make lengths equal a power of 2
snake_case = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
snake_case = complex(mpmath.root(x=1, n=self.c_max_length, k=1 ) )
# The product
snake_case = self.__multiply()
def _lowerCamelCase ( self, lowercase_ ) -> Optional[Any]:
snake_case = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(lowercase_ ) <= 1:
return dft[0]
#
snake_case = self.c_max_length // 2
while next_ncol > 0:
snake_case = [[] for i in range(lowercase_ )]
snake_case = self.root**next_ncol
# First half of next step
snake_case = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowercase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
snake_case = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowercase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
snake_case = new_dft
snake_case = next_ncol // 2
return dft[0]
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.__dft('A' )
snake_case = self.__dft('B' )
snake_case = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case = 2
while next_ncol <= self.c_max_length:
snake_case = [[] for i in range(lowercase_ )]
snake_case = self.root ** (next_ncol // 2)
snake_case = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
snake_case = new_inverse_c
next_ncol *= 2
# Unpack
snake_case = [round(x[0].real, 8 ) + round(x[0].imag, 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Optional[Any]:
snake_case = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
snake_case = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
snake_case = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( __lowercase , unittest.TestCase ):
snake_case_ = CanineTokenizer
snake_case_ = False
def _lowerCamelCase ( self ) -> Optional[int]:
super().setUp()
snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self ) -> int:
return CanineTokenizer.from_pretrained('google/canine-s' )
def _lowerCamelCase ( self, **lowercase_ ) -> CanineTokenizer:
snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname, **_a )
snake_case = 1024
return tokenizer
@require_torch
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.canine_tokenizer
snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
snake_case = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
snake_case = tokenizer(_a, padding=_a, return_tensors='pt' )
self.assertIsInstance(_a, _a )
snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_a, _a )
self.assertEqual((2, 39), batch.input_ids.shape )
self.assertEqual((2, 39), batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self ) -> str:
snake_case = self.canine_tokenizer
snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
snake_case = tokenizer(_a, padding=_a, return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids', _a )
self.assertIn('attention_mask', _a )
self.assertIn('token_type_ids', _a )
@require_torch
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.canine_tokenizer
snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
snake_case = tokenizer(
text_target=_a, max_length=32, padding='max_length', truncation=_a, return_tensors='pt' )
self.assertEqual(32, targets['input_ids'].shape[1] )
def _lowerCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case = tempfile.mkdtemp()
snake_case = ' He is very happy, UNwant\u00E9d,running'
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
tokenizer.save_pretrained(_a )
snake_case = tokenizer.__class__.from_pretrained(_a )
snake_case = after_tokenizer.encode(_a, add_special_tokens=_a )
self.assertListEqual(_a, _a )
shutil.rmtree(_a )
snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case = tempfile.mkdtemp()
snake_case = ' He is very happy, UNwant\u00E9d,running'
snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case = chr(0Xe007 )
additional_special_tokens.append(_a )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
tokenizer.save_pretrained(_a )
snake_case = tokenizer.__class__.from_pretrained(_a )
snake_case = after_tokenizer.encode(_a, add_special_tokens=_a )
self.assertListEqual(_a, _a )
self.assertIn(_a, after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
snake_case = tokenizer.__class__.from_pretrained(_a, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_a )
def _lowerCamelCase ( self ) -> int:
snake_case = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case , snake_case = self.get_clean_sequence(_a )
# a special token for Canine can be defined as follows:
snake_case = 0Xe005
snake_case = chr(_a )
tokenizer.add_special_tokens({'cls_token': special_token} )
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
self.assertEqual(len(_a ), 1 )
snake_case = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=_a )
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
self.assertEqual(_a, input_encoded + special_token_id )
snake_case = tokenizer.decode(_a, skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case = chr(0Xe005 )
snake_case = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=_a )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
snake_case = tokenizer.tokenize(_a )
snake_case = tokenizer.tokenize(_a )
self.assertEqual(len(_a ), 1 )
self.assertEqual(len(_a ), 1 )
self.assertEqual(token_a[0], _a )
self.assertEqual(token_a[0], _a )
@require_tokenizers
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
snake_case = 0Xe006
snake_case = chr(_a )
snake_case = AddedToken(_a, lstrip=_a )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_a )
tokenizer.from_pretrained(_a )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
snake_case = json.load(_a )
with open(os.path.join(_a, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
snake_case = json.load(_a )
# a special token for Canine can be defined as follows:
snake_case = 0Xe006
snake_case = chr(_a )
snake_case = [new_token_a]
snake_case = [new_token_a]
with open(os.path.join(_a, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_a, _a )
with open(os.path.join(_a, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_a, _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case = tokenizer_class.from_pretrained(_a, extra_ids=0 )
self.assertIn(_a, tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ), )
snake_case = 0Xe007
snake_case = chr(_a )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case = [AddedToken(_a, lstrip=_a )]
snake_case = tokenizer_class.from_pretrained(
_a, additional_special_tokens=_a, extra_ids=0 )
self.assertIn(_a, tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case = 'hello world'
if self.space_between_special_tokens:
snake_case = '[CLS] hello world [SEP]'
else:
snake_case = input
snake_case = tokenizer.encode(_a, add_special_tokens=_a )
snake_case = tokenizer.decode(_a, spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_a, [output, output.lower()] )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
snake_case = 'a'
snake_case = ord(_a )
for attr in attributes_list:
setattr(_a, attr + '_id', _a )
self.assertEqual(getattr(_a, _a ), _a )
self.assertEqual(getattr(_a, attr + '_id' ), _a )
setattr(_a, attr + '_id', _a )
self.assertEqual(getattr(_a, _a ), _a )
self.assertEqual(getattr(_a, attr + '_id' ), _a )
setattr(_a, 'additional_special_tokens_ids', [] )
self.assertListEqual(getattr(_a, 'additional_special_tokens' ), [] )
self.assertListEqual(getattr(_a, 'additional_special_tokens_ids' ), [] )
snake_case = 0Xe006
snake_case = chr(_a )
setattr(_a, 'additional_special_tokens_ids', [additional_special_token_id] )
self.assertListEqual(getattr(_a, 'additional_special_tokens' ), [additional_special_token] )
self.assertListEqual(getattr(_a, 'additional_special_tokens_ids' ), [additional_special_token_id] )
def _lowerCamelCase ( self ) -> Any:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> List[Any]:
pass
def _lowerCamelCase ( self ) -> Tuple:
pass
def _lowerCamelCase ( self ) -> int:
pass
def _lowerCamelCase ( self ) -> Any:
pass
def _lowerCamelCase ( self ) -> Any:
pass
def _lowerCamelCase ( self ) -> List[str]:
pass
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase_ = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
lowerCAmelCase_ = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
lowerCAmelCase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __magic_name__ ( A ) -> Optional[Any]:
snake_case = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __magic_name__ ( A ) -> int:
return x[0]
def __magic_name__ ( A ) -> List[Any]:
snake_case = get_letter_count(__lowerCamelCase )
snake_case = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
snake_case = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowerCamelCase )
snake_case = "".join(freq_to_letter[freq] )
snake_case = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase , reverse=__lowerCamelCase )
snake_case = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __magic_name__ ( A ) -> Tuple:
snake_case = get_frequency_order(__lowerCamelCase )
snake_case = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase_ = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def __magic_name__ ( A , A=None , A=None , A=None ) -> str:
snake_case = True
while ask_again:
snake_case = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def __magic_name__ ( A , A=[] , A=None , A=0 ) -> str:
snake_case = BulletMenu(_snake_case , _snake_case )
snake_case = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def __magic_name__ ( A ) -> List[str]:
snake_case = int(_snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __magic_name__ ( A ) -> Optional[int]:
snake_case = int(_snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __magic_name__ ( A ) -> Tuple:
snake_case = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __magic_name__ ( A ) -> List[Any]:
snake_case = int(_snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __magic_name__ ( A ) -> Optional[int]:
snake_case = int(_snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __magic_name__ ( A ) -> Tuple:
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
snake_case = super()._format_usage(a_, a_, a_, a_ )
snake_case = usage.replace('<command> [<args>] ', '' )
return usage
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
def __get__( self, lowercase_, lowercase_=None ) -> Optional[int]:
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
snake_case = '__cached_' + self.fget.__name__
snake_case = getattr(snake_case__, snake_case__, snake_case__ )
if cached is None:
snake_case = self.fget(snake_case__ )
setattr(snake_case__, snake_case__, snake_case__ )
return cached
def __magic_name__ ( UpperCamelCase_ ) -> str:
snake_case = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def __magic_name__ ( UpperCamelCase_ ) -> Union[str, Any]:
if is_torch_fx_proxy(_A ):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer) ):
return True
return isinstance(_A , np.ndarray )
def __magic_name__ ( UpperCamelCase_ ) -> Any:
return isinstance(_A , np.ndarray )
def __magic_name__ ( UpperCamelCase_ ) -> Union[str, Any]:
return _is_numpy(_A )
def __magic_name__ ( UpperCamelCase_ ) -> Tuple:
import torch
return isinstance(_A , torch.Tensor )
def __magic_name__ ( UpperCamelCase_ ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch(_A )
def __magic_name__ ( UpperCamelCase_ ) -> List[str]:
import torch
return isinstance(_A , torch.device )
def __magic_name__ ( UpperCamelCase_ ) -> Optional[Any]:
return False if not is_torch_available() else _is_torch_device(_A )
def __magic_name__ ( UpperCamelCase_ ) -> Optional[int]:
import torch
if isinstance(_A , _A ):
if hasattr(_A , _A ):
snake_case = getattr(_A , _A )
else:
return False
return isinstance(_A , torch.dtype )
def __magic_name__ ( UpperCamelCase_ ) -> Dict:
return False if not is_torch_available() else _is_torch_dtype(_A )
def __magic_name__ ( UpperCamelCase_ ) -> str:
import tensorflow as tf
return isinstance(_A , tf.Tensor )
def __magic_name__ ( UpperCamelCase_ ) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(_A )
def __magic_name__ ( UpperCamelCase_ ) -> str:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(_A )
return type(_A ) == tf.Tensor
def __magic_name__ ( UpperCamelCase_ ) -> List[str]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A )
def __magic_name__ ( UpperCamelCase_ ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray )
def __magic_name__ ( UpperCamelCase_ ) -> Optional[Any]:
return False if not is_flax_available() else _is_jax(_A )
def __magic_name__ ( UpperCamelCase_ ) -> int:
if isinstance(_A , (dict, UserDict) ):
return {k: to_py_obj(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return [to_py_obj(_A ) for o in obj]
elif is_tf_tensor(_A ):
return obj.numpy().tolist()
elif is_torch_tensor(_A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A ):
return np.asarray(_A ).tolist()
elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __magic_name__ ( UpperCamelCase_ ) -> Tuple:
if isinstance(_A , (dict, UserDict) ):
return {k: to_numpy(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return np.array(_A )
elif is_tf_tensor(_A ):
return obj.numpy()
elif is_torch_tensor(_A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A ):
return np.asarray(_A )
else:
return obj
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
def _lowerCamelCase ( self ) -> str:
snake_case = fields(self )
# Safety and consistency checks
if not len(snake_case__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
snake_case = getattr(self, class_fields[0].name )
snake_case = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case__ ):
if isinstance(snake_case__, snake_case__ ):
snake_case = first_field.items()
snake_case = True
else:
try:
snake_case = iter(snake_case__ )
snake_case = True
except TypeError:
snake_case = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case__ ):
if (
not isinstance(snake_case__, (list, tuple) )
or not len(snake_case__ ) == 2
or not isinstance(element[0], snake_case__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
snake_case = element[1]
elif first_field is not None:
snake_case = first_field
else:
for field in class_fields:
snake_case = getattr(self, field.name )
if v is not None:
snake_case = v
def __delitem__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Dict:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Any:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self, lowercase_ ) -> Optional[Any]:
if isinstance(snake_case__, snake_case__ ):
snake_case = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, lowercase_, lowercase_ ) -> Optional[int]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case__, snake_case__ )
super().__setattr__(snake_case__, snake_case__ )
def __setitem__( self, lowercase_, lowercase_ ) -> int:
super().__setitem__(snake_case__, snake_case__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case__, snake_case__ )
def _lowerCamelCase ( self ) -> Tuple:
return tuple(self[k] for k in self.keys() )
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Optional[Any]:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
snake_case_ = '''longest'''
snake_case_ = '''max_length'''
snake_case_ = '''do_not_pad'''
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
snake_case_ = '''pt'''
snake_case_ = '''tf'''
snake_case_ = '''np'''
snake_case_ = '''jax'''
class lowerCamelCase :
def __init__( self, lowercase_ ) -> Optional[Any]:
snake_case = context_managers
snake_case = ExitStack()
def __enter__( self ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(snake_case__ )
def __exit__( self, *lowercase_, **lowercase_ ) -> int:
self.stack.__exit__(*snake_case__, **snake_case__ )
def __magic_name__ ( UpperCamelCase_ ) -> str:
snake_case = infer_framework(_A )
if framework == "tf":
snake_case = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __magic_name__ ( UpperCamelCase_ ) -> List[Any]:
snake_case = model_class.__name__
snake_case = infer_framework(_A )
if framework == "tf":
snake_case = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ = "" , UpperCamelCase_ = "." ) -> int:
def _flatten_dict(UpperCamelCase_ , UpperCamelCase_="" , UpperCamelCase_="." ):
for k, v in d.items():
snake_case = str(_A ) + delimiter + str(_A ) if parent_key else k
if v and isinstance(_A , _A ):
yield from flatten_dict(_A , _A , delimiter=_A ).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A ) )
@contextmanager
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ = False ) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[Any]:
if is_numpy_array(_A ):
return np.transpose(_A , axes=_A )
elif is_torch_tensor(_A ):
return array.T if axes is None else array.permute(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.transpose(_A , perm=_A )
elif is_jax_tensor(_A ):
return jnp.transpose(_A , axes=_A )
else:
raise ValueError(F'''Type not supported for transpose: {type(_A )}.''' )
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
if is_numpy_array(_A ):
return np.reshape(_A , _A )
elif is_torch_tensor(_A ):
return array.reshape(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.reshape(_A , _A )
elif is_jax_tensor(_A ):
return jnp.reshape(_A , _A )
else:
raise ValueError(F'''Type not supported for reshape: {type(_A )}.''' )
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_=None ) -> Tuple:
if is_numpy_array(_A ):
return np.squeeze(_A , axis=_A )
elif is_torch_tensor(_A ):
return array.squeeze() if axis is None else array.squeeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.squeeze(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.squeeze(_A , axis=_A )
else:
raise ValueError(F'''Type not supported for squeeze: {type(_A )}.''' )
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
if is_numpy_array(_A ):
return np.expand_dims(_A , _A )
elif is_torch_tensor(_A ):
return array.unsqueeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.expand_dims(_A , axis=_A )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(_A )}.''' )
def __magic_name__ ( UpperCamelCase_ ) -> str:
if is_numpy_array(_A ):
return np.size(_A )
elif is_torch_tensor(_A ):
return array.numel()
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.size(_A )
elif is_jax_tensor(_A ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(_A )}.''' )
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
for key, value in auto_map.items():
if isinstance(_A , (tuple, list) ):
snake_case = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case = F'''{repo_id}--{value}'''
return auto_map
def __magic_name__ ( UpperCamelCase_ ) -> List[str]:
for base_class in inspect.getmro(_A ):
snake_case = base_class.__module__
snake_case = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''poolformer'''
def __init__( self, lowercase_=3, lowercase_=16, lowercase_=16, lowercase_=3, lowercase_=4.0, lowercase_=[2, 2, 6, 2], lowercase_=[64, 128, 320, 512], lowercase_=[7, 3, 3, 3], lowercase_=[4, 2, 2, 2], lowercase_=[2, 1, 1, 1], lowercase_=4, lowercase_=0.0, lowercase_="gelu", lowercase_=True, lowercase_=1E-5, lowercase_=0.02, **lowercase_, ) -> Dict:
snake_case = num_channels
snake_case = patch_size
snake_case = stride
snake_case = padding
snake_case = pool_size
snake_case = hidden_sizes
snake_case = mlp_ratio
snake_case = depths
snake_case = patch_sizes
snake_case = strides
snake_case = num_encoder_blocks
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_layer_scale
snake_case = layer_scale_init_value
snake_case = initializer_range
super().__init__(**__UpperCAmelCase )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self ) -> float:
return 2E-3
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase_ = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowerCamelCase ( _UpperCAmelCase ):
snake_case_ = 'facebook/nllb-200-distilled-600M'
snake_case_ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case_ = 'translator'
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = LANGUAGE_CODES
snake_case_ = ['text', 'text', 'text']
snake_case_ = ['text']
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
snake_case = self.lang_to_code[src_lang]
snake_case = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_, return_tensors='pt', src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( self, lowercase_ ) -> Dict:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( self, lowercase_ ) -> Any:
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase_ = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = 'blip_2_vision_model'
def __init__( self, lowercase_=1408, lowercase_=6144, lowercase_=39, lowercase_=16, lowercase_=224, lowercase_=14, lowercase_="gelu", lowercase_=0.00_001, lowercase_=0.0, lowercase_=1E-10, lowercase_=True, **lowercase_, ) -> Dict:
super().__init__(**__lowerCAmelCase )
snake_case = hidden_size
snake_case = intermediate_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = patch_size
snake_case = image_size
snake_case = initializer_range
snake_case = attention_dropout
snake_case = layer_norm_eps
snake_case = hidden_act
snake_case = qkv_bias
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> Dict:
cls._set_token_in_kwargs(__lowerCAmelCase )
snake_case , snake_case = cls.get_config_dict(__lowerCAmelCase, **__lowerCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase, **__lowerCAmelCase )
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = 'blip_2_qformer'
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=0.02, lowercase_=1E-12, lowercase_=0, lowercase_="absolute", lowercase_=2, lowercase_=1408, **lowercase_, ) -> str:
super().__init__(pad_token_id=__lowerCAmelCase, **__lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = cross_attention_frequency
snake_case = encoder_hidden_size
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> List[Any]:
cls._set_token_in_kwargs(__lowerCAmelCase )
snake_case , snake_case = cls.get_config_dict(__lowerCAmelCase, **__lowerCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase, **__lowerCAmelCase )
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = 'blip-2'
snake_case_ = True
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=32, **lowercase_ ) -> Any:
super().__init__(**__lowerCAmelCase )
if vision_config is None:
snake_case = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
snake_case = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
snake_case = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
snake_case = BlipaVisionConfig(**__lowerCAmelCase )
snake_case = BlipaQFormerConfig(**__lowerCAmelCase )
snake_case = text_config['model_type'] if 'model_type' in text_config else 'opt'
snake_case = CONFIG_MAPPING[text_model_type](**__lowerCAmelCase )
snake_case = self.text_config.tie_word_embeddings
snake_case = self.text_config.is_encoder_decoder
snake_case = num_query_tokens
snake_case = self.vision_config.hidden_size
snake_case = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case = 1.0
snake_case = 0.02
@classmethod
def _lowerCamelCase ( cls, lowercase_, lowercase_, lowercase_, **lowercase_, ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **__lowerCAmelCase, )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.vision_config.to_dict()
snake_case = self.qformer_config.to_dict()
snake_case = self.text_config.to_dict()
snake_case = self.__class__.model_type
return output
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = field(default_factory=SCREAMING_SNAKE_CASE__ )
snake_case_ = field(default_factory=SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self, lowercase_ ) -> Optional[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=SCREAMING_SNAKE_CASE__ )
snake_case_ = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__( self, lowercase_ ) -> Optional[Any]:
snake_case = Tracker(self.dest )(a_ ).parametrized
snake_case = Tracker(self.src )(a_ ).parametrized
snake_case = list(filter(lambda lowercase_ : type(a_ ) not in self.src_skip, a_ ) )
snake_case = list(filter(lambda lowercase_ : type(a_ ) not in self.dest_skip, a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_, a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __magic_name__ ( A , A , A , A = True ) -> Dict:
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase ).eval()
snake_case = ResNetForImageClassification(_UpperCamelCase ).eval()
snake_case = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase )
snake_case = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(_UpperCamelCase )
assert torch.allclose(from_model(_UpperCamelCase ) , our_model(_UpperCamelCase ).logits ), "The model logits don't match the original one."
snake_case = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_UpperCamelCase , )
# we can use the convnext one
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_UpperCamelCase , )
print(F'''Pushed {checkpoint_name}''' )
def __magic_name__ ( A , A = None , A = True ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = (1, num_labels)
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
snake_case = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> str:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def _lowerCamelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> int:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> Any:
snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
snake_case = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__lowerCAmelCase, return_tensors='np' )
snake_case = processor(images=__lowerCAmelCase, return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_torch
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = [torch.ones((1, 3, 5, 5) )]
snake_case = [[1764, 2646]]
snake_case = [[683, 1024]]
snake_case = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = processor.post_process_masks(
__lowerCAmelCase, torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
snake_case = [np.ones((1, 3, 5, 5) )]
snake_case = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
snake_case = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Dict:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def _lowerCamelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> str:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> str:
snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
snake_case = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__lowerCAmelCase, return_tensors='np' )
snake_case = processor(images=__lowerCAmelCase, return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_tf
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = [tf.ones((1, 3, 5, 5) )]
snake_case = [[1764, 2646]]
snake_case = [[683, 1024]]
snake_case = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors='tf' )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = processor.post_process_masks(
__lowerCAmelCase, tf.convert_to_tensor(__lowerCAmelCase ), tf.convert_to_tensor(__lowerCAmelCase ), return_tensors='tf', )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
snake_case = [np.ones((1, 3, 5, 5) )]
snake_case = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors='tf' )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors='tf' )
@require_vision
@require_torchvision
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def _lowerCamelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case = [tf.convert_to_tensor(__lowerCAmelCase )]
snake_case = [torch.tensor(__lowerCAmelCase )]
snake_case = [[1764, 2646]]
snake_case = [[683, 1024]]
snake_case = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors='tf' )
snake_case = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__lowerCAmelCase, return_tensors='pt' )['pixel_values'].numpy()
snake_case = processor(images=__lowerCAmelCase, return_tensors='pt' )['pixel_values'].numpy()
snake_case = image_processor(__lowerCAmelCase, return_tensors='tf' )['pixel_values'].numpy()
snake_case = processor(images=__lowerCAmelCase, return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A = 1_0_0_0_0_0_0 ) -> int:
snake_case = set(range(3 , a__ , 2 ) )
primes.add(2 )
for p in range(3 , a__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , a__ , a__ ) ) )
snake_case = [float(a__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(a__ , limit + 1 , a__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( A = "" ) -> dict[str, float]:
snake_case = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
snake_case = BeautifulSoup(requests.get(lowercase_ ).text , 'html.parser' )
snake_case = soup.find_all('td' , attrs='titleColumn' )
snake_case = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def __magic_name__ ( A = "IMDb_Top_250_Movies.csv" ) -> None:
snake_case = get_imdb_top_aaa_movies()
with open(lowercase_ , 'w' , newline='' ) as out_file:
snake_case = csv.writer(lowercase_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __magic_name__ ( A , A , A , A , A , A , A , A=False , ) -> List[str]:
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __magic_name__ ( A , A , A , A = False ) -> int:
snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
snake_case = 'cpu'
snake_case = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
snake_case = Path(UpperCamelCase__ )
# TEXT ENCODER
snake_case = pipeline.text_encoder.config.max_position_embeddings
snake_case = pipeline.text_encoder.config.hidden_size
snake_case = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
snake_case = pipeline.unet.config.in_channels
snake_case = pipeline.unet.config.sample_size
snake_case = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
snake_case = str(unet_path.absolute().as_posix() )
snake_case = os.path.dirname(UpperCamelCase__ )
snake_case = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
snake_case = pipeline.vae
snake_case = vae_encoder.config.in_channels
snake_case = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case = lambda A , A : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
snake_case = pipeline.vae
snake_case = vae_decoder.config.latent_channels
snake_case = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case = pipeline.safety_checker
snake_case = safety_checker.config.vision_config.num_channels
snake_case = safety_checker.config.vision_config.image_size
snake_case = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
snake_case = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
snake_case = pipeline.feature_extractor
else:
snake_case = None
snake_case = None
snake_case = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
snake_case = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __magic_name__ ( A = 8 ) -> Union[str, Any]:
snake_case = ascii_letters + digits + punctuation
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def __magic_name__ ( A , A ) -> Tuple:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_a )
snake_case = i // 3
snake_case = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case = (
chars_incl
+ random(_a , quotient + remainder )
+ random(_a , _a )
+ random(_a , _a )
)
snake_case = list(_a )
shuffle(_a )
return "".join(_a )
# random is a generalised function for letters, characters and numbers
def __magic_name__ ( A , A ) -> List[str]:
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def __magic_name__ ( A , A ) -> Tuple:
pass # Put your code here...
def __magic_name__ ( A , A ) -> Optional[int]:
pass # Put your code here...
def __magic_name__ ( A , A ) -> Any:
pass # Put your code here...
def __magic_name__ ( A , A = 8 ) -> Any:
if len(_a ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case = any(char in ascii_uppercase for char in password )
snake_case = any(char in ascii_lowercase for char in password )
snake_case = any(char in digits for char in password )
snake_case = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __magic_name__ ( ) -> Union[str, Any]:
snake_case = int(input('Please indicate the max length of your password: ' ).strip() )
snake_case = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_a ) )
print(
'Alternative Password generated:' , alternative_password_generator(_a , _a ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( A = "base_exp.txt" ) -> Union[str, Any]:
snake_case = 0
snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
snake_case = list(map(_UpperCAmelCase , line.split(',' ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
snake_case = x * logaa(_UpperCAmelCase )
snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase_ ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__ )
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case = 'A painting of a squirrel eating a burger '
snake_case = torch.manual_seed(0 )
snake_case = pipe(
prompt=__lowerCamelCase, generator=__lowerCamelCase, guidance_scale=7.5, num_inference_steps=2, output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case = generator.manual_seed(0 )
snake_case = pipe(
prompt=__lowerCamelCase, generator=__lowerCamelCase, guidance_scale=7.5, num_inference_steps=2, output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self ) -> int:
snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion', torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case = 'A painting of a squirrel eating a burger '
snake_case = torch.manual_seed(0 )
snake_case = pipe(
prompt=__lowerCamelCase, generator=__lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='numpy' ).images
snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = 0
snake_case = [0]
snake_case = [0]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 0 )
snake_case = [60]
snake_case = [10]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 0 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = 3
snake_case = [1, 2, 3]
snake_case = [3, 2, 1]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 5 )
def _lowerCamelCase ( self ) -> str:
snake_case = 50
snake_case = [60, 100, 120]
snake_case = [10, 20, 30]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 220 )
if __name__ == "__main__":
unittest.main()
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase_ = ["small", "medium", "large"]
lowerCAmelCase_ = "lm_head.decoder.weight"
lowerCAmelCase_ = "lm_head.weight"
def __magic_name__ ( A , A ) -> Tuple:
snake_case = torch.load(A )
snake_case = d.pop(A )
os.makedirs(A , exist_ok=A )
torch.save(A , os.path.join(A , A ) )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
lowerCAmelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase_ = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
lowerCAmelCase_ = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase_ = 5_0_0_0_0
lowerCAmelCase_ = 5_0_0_0
lowerCAmelCase_ , lowerCAmelCase_ = os.path.split(__file__)
lowerCAmelCase_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __magic_name__ ( A , A ) -> Any:
for i in range(_lowerCAmelCase ):
snake_case = dataset[i]
@get_duration
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ):
snake_case = dataset[i : i + batch_size]
@get_duration
def __magic_name__ ( A , A , A ) -> Dict:
with dataset.formatted_as(type=_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
snake_case = dataset[i]
@get_duration
def __magic_name__ ( A , A , A , A ) -> List[Any]:
with dataset.formatted_as(type=_lowerCAmelCase ):
for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
snake_case = dataset[i : i + batch_size]
def __magic_name__ ( ) -> Optional[int]:
snake_case = {"""num examples""": SPEED_TEST_N_EXAMPLES}
snake_case = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
snake_case = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
snake_case = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
snake_case = generate_example_dataset(
os.path.join(_lowerCAmelCase , 'dataset.arrow' ) , _lowerCAmelCase , num_examples=_lowerCAmelCase , seq_shapes={'list': (1_0_0,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(_lowerCAmelCase ) )
snake_case = func(_lowerCAmelCase , **_lowerCAmelCase )
print('shuffling dataset' )
snake_case = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(_lowerCAmelCase ) )
snake_case = func(
_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , 'wb' ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> str:
snake_case = len(__lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
snake_case = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case = arr[mi::-1] + arr[mi + 1 : len(__lowerCAmelCase )]
# Reverse whole list
snake_case = arr[cur - 1 :: -1] + arr[cur : len(__lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __magic_name__ ( A , A , A , A , ) -> tuple[float | int, list[tuple[int, int]]]:
snake_case = grid.shape
snake_case = [-1, 1, 0, 0]
snake_case = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
snake_case = [(0, source)], set()
snake_case = np.full((rows, cols) , np.inf )
snake_case = 0
snake_case = np.empty((rows, cols) , dtype=_UpperCAmelCase )
snake_case = None
while queue:
(snake_case) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
snake_case = []
while (x, y) != source:
path.append((x, y) )
snake_case = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
snake_case = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
snake_case = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
snake_case = dist + 1
snake_case = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = """data2vec-text"""
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Union[str, Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE, bos_token_id=__SCREAMING_SNAKE_CASE, eos_token_id=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( lowerCAmelCase_ ):
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ = 0 ) -> int:
snake_case = length or len(lowerCamelCase__ )
snake_case = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case , snake_case = list_data[i + 1], list_data[i]
snake_case = True
return list_data if not swapped else bubble_sort(lowerCamelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bart'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self, lowercase_=50265, lowercase_=1024, lowercase_=12, lowercase_=4096, lowercase_=16, lowercase_=12, lowercase_=4096, lowercase_=16, lowercase_=0.0, lowercase_=0.0, lowercase_="gelu", lowercase_=1024, lowercase_=0.1, lowercase_=0.0, lowercase_=0.0, lowercase_=0.02, lowercase_=0.0, lowercase_=False, lowercase_=True, lowercase_=3, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_=True, lowercase_=2, lowercase_=2, **lowercase_, ) -> Optional[Any]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCAmelCase, pad_token_id=_lowerCAmelCase, bos_token_id=_lowerCAmelCase, eos_token_id=_lowerCAmelCase, is_encoder_decoder=_lowerCAmelCase, decoder_start_token_id=_lowerCAmelCase, forced_eos_token_id=_lowerCAmelCase, **_lowerCAmelCase, )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', _lowerCAmelCase ):
snake_case = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> str:
if self.task in ["default", "seq2seq-lm"]:
snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
snake_case = {0: 'batch'}
snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
snake_case = {0: 'batch', 1: 'decoder_sequence'}
snake_case = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase, direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
snake_case , snake_case = self.num_layers
for i in range(_lowerCAmelCase ):
snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
else:
snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _lowerCamelCase ( self ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
snake_case = super().outputs
else:
snake_case = super(_lowerCAmelCase, self ).outputs
if self.use_past:
snake_case , snake_case = self.num_layers
for i in range(_lowerCAmelCase ):
snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _lowerCamelCase ( self, lowercase_, lowercase_ = -1, lowercase_ = -1, lowercase_ = False, lowercase_ = None, ) -> Optional[Any]:
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Generate decoder inputs
snake_case = seq_length if not self.use_past else 1
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
snake_case = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
snake_case = dict(**_lowerCAmelCase, **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case , snake_case = common_inputs['input_ids'].shape
snake_case = common_inputs['decoder_input_ids'].shape[1]
snake_case , snake_case = self.num_attention_heads
snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case = decoder_seq_length + 3
snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase, _lowerCAmelCase )], dim=1 )
snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case , snake_case = self.num_layers
snake_case = min(_lowerCAmelCase, _lowerCAmelCase )
snake_case = max(_lowerCAmelCase, _lowerCAmelCase ) - min_num_layers
snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase, _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _lowerCamelCase ( self, lowercase_, lowercase_ = -1, lowercase_ = -1, lowercase_ = False, lowercase_ = None, ) -> str:
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case , snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case , snake_case = self.num_layers
snake_case , snake_case = self.num_attention_heads
snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case = common_inputs['attention_mask'].dtype
snake_case = torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase, _lowerCAmelCase, dtype=_lowerCAmelCase )], dim=1 )
snake_case = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _lowerCamelCase ( self, lowercase_, lowercase_ = -1, lowercase_ = -1, lowercase_ = False, lowercase_ = None, ) -> List[str]:
snake_case = compute_effective_axis_dimension(
_lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
snake_case = compute_effective_axis_dimension(
_lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case = dict(tokenizer(_lowerCAmelCase, return_tensors=_lowerCAmelCase ) )
return common_inputs
def _lowerCamelCase ( self, lowercase_, lowercase_ = -1, lowercase_ = -1, lowercase_ = False, lowercase_ = None, ) -> str:
if self.task in ["default", "seq2seq-lm"]:
snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase, batch_size=_lowerCAmelCase, seq_length=_lowerCAmelCase, is_pair=_lowerCAmelCase, framework=_lowerCAmelCase )
elif self.task == "causal-lm":
snake_case = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase, batch_size=_lowerCAmelCase, seq_length=_lowerCAmelCase, is_pair=_lowerCAmelCase, framework=_lowerCAmelCase )
else:
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase, batch_size=_lowerCAmelCase, seq_length=_lowerCAmelCase, is_pair=_lowerCAmelCase, framework=_lowerCAmelCase )
return common_inputs
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case = super()._flatten_past_key_values_(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
snake_case = super(_lowerCAmelCase, self )._flatten_past_key_values_(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
from __future__ import annotations
class lowerCamelCase :
def __init__( self, lowercase_ ) -> None:
snake_case = data
snake_case = None
snake_case = None
def __magic_name__ ( A ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __magic_name__ ( A ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __magic_name__ ( A ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __magic_name__ ( ) -> None: # Main function for testing.
snake_case = Node(1 )
snake_case = Node(2 )
snake_case = Node(3 )
snake_case = Node(4 )
snake_case = Node(5 )
snake_case = Node(6 )
snake_case = Node(7 )
snake_case = Node(8 )
snake_case = Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print('Tree is: ' )
display(_A )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = "▁"
lowerCAmelCase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowerCAmelCase_ = {
"google/pegasus-xsum": 5_1_2,
}
class lowerCamelCase ( snake_case_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PegasusTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_="<pad>", lowercase_="</s>", lowercase_="<unk>", lowercase_="<mask_2>", lowercase_="<mask_1>", lowercase_=None, lowercase_=103, **lowercase_, ) -> Optional[int]:
snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_, lowercase_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowercase_ )}, but is'''
F''' {type(lowercase_ )}''' )
snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowercase_ ), self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case = additional_special_tokens_extended
else:
snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
super().__init__(
lowercase_, tokenizer_file=lowercase_, pad_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, mask_token=lowercase_, mask_token_sent=lowercase_, offset=lowercase_, additional_special_tokens=lowercase_, **lowercase_, )
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def _lowerCamelCase ( self, lowercase_ ) -> Optional[Any]:
snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
lowercase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file, lowercase_ )
return (out_vocab_file,)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> List[Any]:
return " ".join(
''.join(word[::-1] ) if len(A__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Dict:
if gpta_config_file == "":
snake_case = GPTaConfig()
else:
snake_case = GPTaConfig.from_json_file(_UpperCAmelCase )
snake_case = GPTaModel(_UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
snake_case = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowerCAmelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCAmelCase_ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
lowerCAmelCase_ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = " Hello world! cécé herlolip"
lowerCAmelCase_ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def __magic_name__ ( A ) -> Tuple:
snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = dct.pop(a__ )
snake_case = val
def __magic_name__ ( A ) -> Any:
snake_case = torch.load(a__ , map_location='cpu' )
snake_case = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def __magic_name__ ( A ) -> int:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a__ , a__ , bias=a__ )
snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def __magic_name__ ( A , A , A=None ) -> List[str]:
if not os.path.exists(a__ ):
snake_case = torch.hub.load('pytorch/fairseq' , a__ ).eval()
else:
snake_case = load_xsum_checkpoint(a__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
snake_case = checkpoint_path.replace('.' , '-' )
snake_case = BartConfig.from_pretrained(a__ )
snake_case = bart.encode(a__ ).unsqueeze(0 )
snake_case = BartTokenizer.from_pretrained(a__ ).encode(a__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(a__ , a__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
snake_case = bart.state_dict()
remove_ignore_keys_(a__ )
snake_case = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(a__ , a__ , a__ )
snake_case = BartForSequenceClassification(a__ ).eval()
model.load_state_dict(a__ )
snake_case = bart.predict('mnli' , a__ , return_logits=a__ )
snake_case = model(a__ )[0] # logits
else: # no classification heads to worry about
snake_case = bart.model.state_dict()
remove_ignore_keys_(a__ )
snake_case = state_dict['decoder.embed_tokens.weight']
snake_case = bart.extract_features(a__ )
if hf_checkpoint_name == "facebook/bart-large":
snake_case = BartModel(a__ ).eval()
model.load_state_dict(a__ )
snake_case = model(a__ ).model[0]
else:
snake_case = BartForConditionalGeneration(a__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(a__ )
if hasattr(a__ , 'lm_head' ):
snake_case = make_linear_from_emb(model.model.shared )
snake_case = model.model(a__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
lowerCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A , A , A ) -> str:
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( A , A , A ) -> str:
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __magic_name__ ( A , A , A ) -> int:
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __magic_name__ ( A , A , A ) -> Any:
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
snake_case_ = KandinskyVaaControlnetPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def _lowerCamelCase ( self ) -> Any:
return 32
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[str]:
return self.time_input_dim
@property
def _lowerCamelCase ( self ) -> str:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return 100
@property
def _lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**_a )
return model
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='linear', beta_start=0.00_085, beta_end=0.012, clip_sample=_a, set_alpha_to_one=_a, steps_offset=1, prediction_type='epsilon', thresholding=_a, )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> Optional[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_a ) ).to(_a )
snake_case = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
snake_case = floats_tensor((1, 3, 64, 64), rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('mps' ):
snake_case = torch.manual_seed(_a )
else:
snake_case = torch.Generator(device=_a ).manual_seed(_a )
snake_case = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _lowerCamelCase ( self ) -> str:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**_a )
snake_case = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case = pipe(**self.get_dummy_inputs(_a ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(_a ), return_dict=_a, )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case = torch.from_numpy(np.array(_a ) ).float() / 255.0
snake_case = hint.permute(2, 0, 1 ).unsqueeze(0 )
snake_case = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(_a )
snake_case = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
snake_case = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
snake_case = """A robot, 4k photo"""
snake_case = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case = pipe_prior(
_a, generator=_a, num_inference_steps=5, negative_prompt='', ).to_tuple()
snake_case = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case = pipeline(
image_embeds=_a, negative_image_embeds=_a, hint=_a, generator=_a, num_inference_steps=100, output_type='np', )
snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a, _a )
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['DPTFeatureExtractor']
lowerCAmelCase_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowerCAmelCase_ = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __magic_name__ ( A ) -> int:
snake_case = torch.load(__snake_case , map_location='cpu' )
return sd
def __magic_name__ ( A , A , A=rename_keys_prefix ) -> Optional[Any]:
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def __magic_name__ ( A , A ) -> Optional[int]:
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
snake_case = "pretraining"
if "vcr" in checkpoint_path:
snake_case = {"visual_embedding_dim": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
snake_case = {"visual_embedding_dim": 2_0_4_8}
elif "vqa" in checkpoint_path:
snake_case = {"visual_embedding_dim": 2_0_4_8}
elif "nlvr" in checkpoint_path:
snake_case = {"visual_embedding_dim": 1_0_2_4}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
snake_case = {"visual_embedding_dim": 5_1_2}
snake_case = "multichoice"
elif "vqa_advanced" in checkpoint_path:
snake_case = {"visual_embedding_dim": 2_0_4_8}
snake_case = "vqa_advanced"
elif "vqa" in checkpoint_path:
snake_case = {"visual_embedding_dim": 2_0_4_8, "num_labels": 3_1_2_9}
snake_case = "vqa"
elif "nlvr" in checkpoint_path:
snake_case = {
"visual_embedding_dim": 1_0_2_4,
"num_labels": 2,
}
snake_case = "nlvr"
snake_case = VisualBertConfig(**__snake_case )
# Load State Dict
snake_case = load_state_dict(__snake_case )
snake_case = get_new_dict(__snake_case , __snake_case )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(__snake_case )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(__snake_case )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(__snake_case )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(__snake_case )
model.load_state_dict(__snake_case )
# Save Checkpoints
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
lowerCAmelCase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
lowerCAmelCase_ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_58_18,
}
def __magic_name__ ( A , A , A ) -> List[str]:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(_a )}'''
)
raise ValueError(_a )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase_ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __magic_name__ ( A , A , A , A , A , A ) -> Any:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case = 'lm_head'
snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case = 'weight_g'
elif "weight_v" in name:
snake_case = 'weight_v'
elif "bias" in name:
snake_case = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case = 'weight'
else:
snake_case = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __magic_name__ ( A , A , A , A , A ) -> Optional[Any]:
snake_case = full_name.split('conv_layers.' )[-1]
snake_case = name.split('.' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __magic_name__ ( A , A , A=None , A=None , A=True ) -> List[Any]:
if config_path is not None:
snake_case = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case = 4_2
snake_case = 4_3
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
snake_case = True if config.feat_extract_norm == 'layer' else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
snake_case = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
lowerCAmelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase ( A , A , A ) -> list[int]:
snake_case = True
snake_case = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(A , A , A )
order.append(A )
return order
def lowerCamelCase ( A , A , A ) -> list[int]:
snake_case = True
snake_case = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(A , A , A )
return component
def lowerCamelCase ( A ) -> list[list[int]]:
snake_case = len(A ) * [False]
snake_case = {vert: [] for vert in range(len(A ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(A )
snake_case = []
for i, was_visited in enumerate(A ):
if not was_visited:
order += topology_sort(A , A , A )
snake_case = []
snake_case = len(A ) * [False]
for i in range(len(A ) ):
snake_case = order[len(A ) - i - 1]
if not visited[vert]:
snake_case = find_components(A , A , A )
components_list.append(A )
return components_list
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_ = 13, lowercase_ = 64, lowercase_ = 2, lowercase_ = 3, lowercase_ = 3, lowercase_ = True, lowercase_ = True, lowercase_ = 128, lowercase_=[16, 32, 64, 128], lowercase_ = 7, lowercase_ = 4, lowercase_ = 37, lowercase_ = "gelu", lowercase_ = 0.1, lowercase_ = 0.1, lowercase_ = 10, lowercase_ = 0.02, lowercase_ = 2, lowercase_ = 1, lowercase_ = 128, lowercase_ = [2, 2, 2, 2], lowercase_ = 2, lowercase_ = 2, ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = encoder_stride
snake_case = num_attention_outputs
snake_case = embed_dim
snake_case = embed_dim + 1
snake_case = resolution
snake_case = depths
snake_case = hidden_sizes
snake_case = dim
snake_case = mlp_expansion_ratio
def _lowerCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> Optional[Any]:
return EfficientFormerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowercase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> List[Any]:
snake_case = TFEfficientFormerModel(config=lowercase_ )
snake_case = model(lowercase_, training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFEfficientFormerForImageClassification(lowercase_ )
snake_case = model(lowercase_, labels=lowercase_, training=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFEfficientFormerForImageClassification(lowercase_ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = TFEfficientFormerModelTester(self )
snake_case = ConfigTester(
self, config_class=lowercase_, has_text_modality=lowercase_, hidden_size=37 )
def _lowerCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _lowerCamelCase ( self ) -> Any:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowercase_ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1], lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
def check_hidden_states_output(lowercase_, lowercase_, lowercase_ ):
snake_case = model_class(lowercase_ )
snake_case = model(**self._prepare_for_class(lowercase_, lowercase_ ), training=lowercase_ )
snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ), lowercase_ )
if hasattr(self.model_tester, 'encoder_seq_length' ):
snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, 'chunk_length' ) and self.model_tester.chunk_length > 1:
snake_case = seq_length * self.model_tester.chunk_length
else:
snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
if config.is_encoder_decoder:
snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_, (list, tuple) )
self.assertEqual(len(lowercase_ ), lowercase_ )
snake_case = getattr(self.model_tester, 'seq_length', lowercase_ )
snake_case = getattr(self.model_tester, 'decoder_seq_length', lowercase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [decoder_seq_length, self.model_tester.hidden_size], )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(lowercase_, lowercase_, lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowercase_, lowercase_, lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> Union[str, Any]:
snake_case = super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCamelCase ( self ) -> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def _lowerCamelCase ( self ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _lowerCamelCase ( self ) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFEfficientFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
snake_case = getattr(self.model_tester, 'seq_length', lowercase_ )
snake_case = getattr(self.model_tester, 'encoder_seq_length', lowercase_ )
snake_case = getattr(self.model_tester, 'key_length', lowercase_ )
snake_case = getattr(self.model_tester, 'chunk_length', lowercase_ )
if chunk_length is not None and hasattr(self.model_tester, 'num_hashes' ):
snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = True
snake_case = model_class(lowercase_ )
snake_case = model(**self._prepare_for_class(lowercase_, lowercase_ ), training=lowercase_ )
snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ), self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(lowercase_ )
snake_case = model(**self._prepare_for_class(lowercase_, lowercase_ ), training=lowercase_ )
snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ), self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], )
def _lowerCamelCase ( self ) -> str:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case = model_class(lowercase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=lowercase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case = model(lowercase_ )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ) -> Union[str, Any]:
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> Optional[Any]:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowercase_, return_tensors='tf' )
# forward pass
snake_case = model(**lowercase_, training=lowercase_ )
# verify the logits
snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, lowercase_ )
snake_case = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowercase_, atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowercase_, return_tensors='tf' )
# forward pass
snake_case = model(**lowercase_, training=lowercase_ )
# verify the logits
snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, lowercase_ )
snake_case = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowercase_, atol=1E-4 ) )
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = "Hello world! cécé herlolip"
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
snake_case = roberta.model.encoder.sentence_encoder
snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
snake_case = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _lowerCamelCase )
snake_case = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case = roberta_sent_encoder.embed_tokens.weight
snake_case = roberta_sent_encoder.embed_positions.weight
snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case = roberta_sent_encoder.layer_norm.weight
snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case = model.roberta.encoder.layer[i]
snake_case = roberta_sent_encoder.layers[i]
snake_case = layer.attention
snake_case = roberta_layer.self_attn_layer_norm.weight
snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case = roberta_layer.self_attn.q_proj.weight
snake_case = roberta_layer.self_attn.q_proj.bias
snake_case = roberta_layer.self_attn.k_proj.weight
snake_case = roberta_layer.self_attn.k_proj.bias
snake_case = roberta_layer.self_attn.v_proj.weight
snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case = roberta_layer.self_attn.out_proj.weight
snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case = roberta_layer.final_layer_norm.weight
snake_case = roberta_layer.final_layer_norm.bias
# intermediate
snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case = roberta_layer.fca.weight
snake_case = roberta_layer.fca.bias
# output
snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case = roberta_layer.fca.weight
snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case = roberta.model.classification_heads["mnli"].dense.weight
snake_case = roberta.model.classification_heads["mnli"].dense.bias
snake_case = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case = roberta.model.encoder.lm_head.dense.weight
snake_case = roberta.model.encoder.lm_head.dense.bias
snake_case = roberta.model.encoder.lm_head.layer_norm.weight
snake_case = roberta.model.encoder.lm_head.layer_norm.bias
snake_case = roberta.model.encoder.lm_head.weight
snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
snake_case = model(_lowerCamelCase )[0]
if classification_head:
snake_case = roberta.model.classification_heads["mnli"](roberta.extract_features(_lowerCamelCase ) )
else:
snake_case = roberta.model(_lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase_ = True
except (ImportError, AttributeError):
lowerCAmelCase_ = object
def __magic_name__ ( *A , **A ) -> List[Any]:
pass
lowerCAmelCase_ = False
lowerCAmelCase_ = logging.get_logger("transformers-cli/serving")
def __magic_name__ ( A ) -> List[str]:
snake_case = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase__ , args.host , args.port , args.workers )
class lowerCamelCase ( a_ ):
snake_case_ = 42
class lowerCamelCase ( a_ ):
snake_case_ = 42
snake_case_ = 42
class lowerCamelCase ( a_ ):
snake_case_ = 42
class lowerCamelCase ( a_ ):
snake_case_ = 42
class lowerCamelCase ( a_ ):
@staticmethod
def _lowerCamelCase ( lowercase_ ) -> List[str]:
snake_case = parser.add_parser(
'serve', help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task', type=lowercase_, choices=get_supported_tasks(), help='The task to run the pipeline on', )
serve_parser.add_argument('--host', type=lowercase_, default='localhost', help='Interface the server will listen on.' )
serve_parser.add_argument('--port', type=lowercase_, default=8888, help='Port the serving will listen to.' )
serve_parser.add_argument('--workers', type=lowercase_, default=1, help='Number of http workers' )
serve_parser.add_argument('--model', type=lowercase_, help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config', type=lowercase_, help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer', type=lowercase_, help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device', type=lowercase_, default=-1, help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)', )
serve_parser.set_defaults(func=lowercase_ )
def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Tuple:
snake_case = pipeline
snake_case = host
snake_case = port
snake_case = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install \"transformers[serving]\".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
snake_case = FastAPI(
routes=[
APIRoute(
'/', self.model_info, response_model=lowercase_, response_class=lowercase_, methods=['GET'], ),
APIRoute(
'/tokenize', self.tokenize, response_model=lowercase_, response_class=lowercase_, methods=['POST'], ),
APIRoute(
'/detokenize', self.detokenize, response_model=lowercase_, response_class=lowercase_, methods=['POST'], ),
APIRoute(
'/forward', self.forward, response_model=lowercase_, response_class=lowercase_, methods=['POST'], ),
], timeout=600, )
def _lowerCamelCase ( self ) -> Union[str, Any]:
run(self._app, host=self.host, port=self.port, workers=self.workers )
def _lowerCamelCase ( self ) -> Optional[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self, lowercase_ = Body(lowercase_, embed=lowercase_ ), lowercase_ = Body(lowercase_, embed=lowercase_ ) ) -> Dict:
try:
snake_case = self._pipeline.tokenizer.tokenize(lowercase_ )
if return_ids:
snake_case = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase_ )
return ServeTokenizeResult(tokens=lowercase_, tokens_ids=lowercase_ )
else:
return ServeTokenizeResult(tokens=lowercase_ )
except Exception as e:
raise HTTPException(status_code=500, detail={'model': '', 'error': str(lowercase_ )} )
def _lowerCamelCase ( self, lowercase_ = Body(lowercase_, embed=lowercase_ ), lowercase_ = Body(lowercase_, embed=lowercase_ ), lowercase_ = Body(lowercase_, embed=lowercase_ ), ) -> Optional[int]:
try:
snake_case = self._pipeline.tokenizer.decode(lowercase_, lowercase_, lowercase_ )
return ServeDeTokenizeResult(model='', text=lowercase_ )
except Exception as e:
raise HTTPException(status_code=500, detail={'model': '', 'error': str(lowercase_ )} )
async def _lowerCamelCase ( self, lowercase_=Body(lowercase_, embed=lowercase_ ) ) -> str:
if len(lowercase_ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
snake_case = self._pipeline(lowercase_ )
return ServeForwardResult(output=lowercase_ )
except Exception as e:
raise HTTPException(500, {'error': str(lowercase_ )} )
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __magic_name__ ( A ) -> int:
snake_case = FileLock(str(tmpdir / 'foo.lock' ) )
snake_case = FileLock(str(tmpdir / 'foo.lock' ) )
snake_case = 0.01
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
snake_case = time.time()
locka.acquire(_SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def __magic_name__ ( A ) -> Tuple:
snake_case = "a" * 1_0_0_0 + ".lock"
snake_case = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(_SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ = 'src/diffusers'
# Pattern that looks at the indentation in a line.
lowerCAmelCase_ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase_ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase_ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase_ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase_ = re.compile(r"\[([^\]]+)\]")
def __magic_name__ ( A ) -> List[Any]:
snake_case = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def __magic_name__ ( A , A="" , A=None , A=None ) -> Dict:
snake_case = 0
snake_case = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
snake_case = ['''\n'''.join(lines[:index] )]
else:
snake_case = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
snake_case = [lines[index + 1]]
index += 1
else:
snake_case = []
else:
blocks.append('\n'.join(UpperCamelCase__ ) )
snake_case = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append('\n'.join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __magic_name__ ( A ) -> Optional[int]:
def _inner(A ):
return key(UpperCamelCase__ ).lower().replace('_' , '' )
return _inner
def __magic_name__ ( A , A=None ) -> Optional[int]:
# If no key is provided, we use a noop.
def noop(A ):
return x
if key is None:
snake_case = noop
# Constants are all uppercase, they go first.
snake_case = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
snake_case = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def __magic_name__ ( A ) -> List[str]:
# This inner function sort imports between [ ].
def _replace(A ):
snake_case = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]"
snake_case = import_statement.split('\n' )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case = 2 if lines[1].strip() == '''[''' else 1
snake_case = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case = sort_objects(UpperCamelCase__ , key=lambda A : x[1] )
snake_case = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case = keys[:-1]
snake_case = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def __magic_name__ ( A , A=True ) -> int:
with open(UpperCamelCase__ , 'r' ) as f:
snake_case = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case = main_blocks[block_idx]
snake_case = block.split('\n' )
# Get to the start of the imports.
snake_case = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case = '''\n'''.join(block_lines[line_idx:-1] )
snake_case = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
snake_case = [x[0] for x in sorted(UpperCamelCase__ , key=lambda A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case = 0
snake_case = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write('\n'.join(UpperCamelCase__ ) )
def __magic_name__ ( A=True ) -> str:
snake_case = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
snake_case = sort_imports(os.path.join(UpperCamelCase__ , '__init__.py' ) , check_only=UpperCamelCase__ )
if result:
snake_case = [os.path.join(UpperCamelCase__ , '__init__.py' )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCAmelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase ( _lowerCamelCase ):
@require_torch
def _lowerCamelCase ( self ) -> Dict:
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask', model=lowercase_ )
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask', model=lowercase_ )
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self ) -> List[str]:
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
snake_case = '\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
# next emulate no network
snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = '\nfrom transformers import pipeline\n '
snake_case = '\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
snake_case = self.get_env()
snake_case = '1'
snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 1, result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', '' ), )
@require_torch
def _lowerCamelCase ( self ) -> Tuple:
snake_case = '\nfrom transformers import AutoModel\n '
snake_case = '\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n '
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowercase_, env=lowercase_, check=lowercase_, capture_output=lowercase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase ( lowerCamelCase__ ):
def __init__( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = True, lowercase_ = None, lowercase_ = False, lowercase_ = None, lowercase_ = True, lowercase_ = "arrow", **lowercase_, ) -> str:
super().__init__(
split=lowercase_, features=lowercase_, cache_dir=lowercase_, keep_in_memory=lowercase_, streaming=lowercase_, **lowercase_, )
snake_case = load_from_cache_file
snake_case = file_format
snake_case = Spark(
df=lowercase_, features=lowercase_, cache_dir=lowercase_, working_dir=lowercase_, **lowercase_, )
def _lowerCamelCase ( self ) -> Optional[int]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase_, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"google/fnet-base": 5_1_2,
"google/fnet-large": 5_1_2,
}
lowerCAmelCase_ = "▁"
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''token_type_ids''']
snake_case_ = FNetTokenizer
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=False, lowercase_=True, lowercase_=True, lowercase_="<unk>", lowercase_="[SEP]", lowercase_="<pad>", lowercase_="[CLS]", lowercase_="[MASK]", **lowercase_, ) -> List[Any]:
snake_case = (
AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_, normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_, lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, remove_space=lowerCamelCase_, keep_accents=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, **lowerCamelCase_, )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Any:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Union[str, Any]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
return (out_vocab_file,)
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = self.vocab_size - 1
def _lowerCamelCase ( self ) -> List[str]:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case = ids_tensor([self.batch_size], self.num_choices )
snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> List[str]:
snake_case = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, token_type_ids=lowercase_, head_mask=lowercase_ )
snake_case = model(lowercase_, token_type_ids=lowercase_ )
snake_case = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> Optional[int]:
snake_case = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> str:
snake_case = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> List[str]:
snake_case = self.num_labels
snake_case = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case_ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> Tuple:
snake_case = super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=lowercase_, )
snake_case = inputs_dict['labels']
snake_case = inputs_dict['labels']
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=lowercase_, )
snake_case = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _lowerCamelCase ( self ) -> int:
snake_case = OpenAIGPTModelTester(self )
snake_case = ConfigTester(self, config_class=lowercase_, n_embd=37 )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _lowerCamelCase ( self ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _lowerCamelCase ( self ) -> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _lowerCamelCase ( self ) -> List[str]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> List[str]:
snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowercase_ )
snake_case = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=lowercase_ ) # the president is
snake_case = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case = model.generate(lowercase_, do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist(), lowercase_ )
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( A__ , A__ , unittest.TestCase ):
snake_case_ = CycleDiffusionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
snake_case_ = PipelineTesterMixin.required_optional_params - {'latents'}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
snake_case = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', num_train_timesteps=1000, clip_sample=lowerCamelCase__, set_alpha_to_one=lowerCamelCase__, )
torch.manual_seed(0 )
snake_case = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
snake_case = CLIPTextModel(lowerCamelCase__ )
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> List[str]:
snake_case = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
snake_case = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('mps' ):
snake_case = torch.manual_seed(lowerCamelCase__ )
else:
snake_case = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
snake_case = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self ) -> int:
snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = CycleDiffusionPipeline(**lowerCamelCase__ )
snake_case = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
snake_case = self.get_dummy_inputs(lowerCamelCase__ )
snake_case = pipe(**lowerCamelCase__ )
snake_case = output.images
snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU' )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase__, 'half' ):
snake_case = module.half()
snake_case = CycleDiffusionPipeline(**lowerCamelCase__ )
snake_case = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
snake_case = self.get_dummy_inputs(lowerCamelCase__ )
snake_case = pipe(**lowerCamelCase__ )
snake_case = output.images
snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self ) -> Optional[Any]:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _lowerCamelCase ( self ) -> Optional[Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCamelCase ( self ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self ) -> List[Any]:
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self ) -> Tuple:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> Dict:
snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
snake_case = init_image.resize((512, 512) )
snake_case = 'CompVis/stable-diffusion-v1-4'
snake_case = DDIMScheduler.from_pretrained(lowerCamelCase__, subfolder='scheduler' )
snake_case = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase__, scheduler=lowerCamelCase__, safety_checker=lowerCamelCase__, torch_dtype=torch.floataa, revision='fp16' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
snake_case = 'A black colored car'
snake_case = 'A blue colored car'
snake_case = torch.manual_seed(0 )
snake_case = pipe(
prompt=lowerCamelCase__, source_prompt=lowerCamelCase__, image=lowerCamelCase__, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase__, output_type='np', )
snake_case = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _lowerCamelCase ( self ) -> Dict:
snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
snake_case = init_image.resize((512, 512) )
snake_case = 'CompVis/stable-diffusion-v1-4'
snake_case = DDIMScheduler.from_pretrained(lowerCamelCase__, subfolder='scheduler' )
snake_case = CycleDiffusionPipeline.from_pretrained(lowerCamelCase__, scheduler=lowerCamelCase__, safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
snake_case = 'A black colored car'
snake_case = 'A blue colored car'
snake_case = torch.manual_seed(0 )
snake_case = pipe(
prompt=lowerCamelCase__, source_prompt=lowerCamelCase__, image=lowerCamelCase__, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase__, output_type='np', )
snake_case = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''OwlViTFeatureExtractor''']
lowerCAmelCase_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase_ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
lowerCAmelCase_ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
lowerCAmelCase_ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def __magic_name__ ( A , A ) -> Dict:
return float((preds == labels).mean() )
def __magic_name__ ( A , A , A="binary" ) -> Optional[Any]:
snake_case = simple_accuracy(_lowercase , _lowercase )
snake_case = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__ ( A , A ) -> List[Any]:
snake_case = {}
for id_pred, label in zip(_lowercase , _lowercase ):
snake_case = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
snake_case = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case = [(pred, label)]
snake_case , snake_case = [], []
for question, preds_labels in question_map.items():
snake_case , snake_case = zip(*_lowercase )
snake_case = fa_score(y_true=_lowercase , y_pred=_lowercase , average='macro' )
fas.append(_lowercase )
snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
snake_case = float(sum(_lowercase ) / len(_lowercase ) )
snake_case = sum(_lowercase ) / len(_lowercase )
snake_case = float(fa_score(y_true=_lowercase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None, )
def _lowerCamelCase ( self ) -> List[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__A, __A )}
elif self.config_name == "cb":
return acc_and_fa(__A, __A, fa_avg='macro' )
elif self.config_name == "record":
snake_case = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
snake_case = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(__A, __A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__A, __A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__A, __A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__ ( A , A ) -> List[str]:
snake_case = old_name
if "patch_embed" in old_name:
snake_case = old_name.split('.' )
if layer == "0":
snake_case = old_name.replace('0' , 'convolution1' )
elif layer == "1":
snake_case = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
snake_case = old_name.replace('3' , 'convolution2' )
else:
snake_case = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , A ):
snake_case = R"""\b\d{2}\b"""
if bool(re.search(A , A ) ):
snake_case = re.search(R'\d\.\d\d.' , A ).group()
else:
snake_case = re.search(R'\d\.\d.' , A ).group()
if int(match[0] ) < 6:
snake_case = old_name.replace(A , '' )
snake_case = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
snake_case = """intermediate_stages.""" + trimmed_name
else:
snake_case = old_name.replace(A , '' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
snake_case = str(int(match[2] ) - num_meta4D_last_stage )
snake_case = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
snake_case = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
snake_case = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
snake_case = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
snake_case = trimmed_name.replace('fc2' , 'linear_out' )
snake_case = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , A ):
snake_case = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
snake_case = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
snake_case = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
snake_case = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
snake_case = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
snake_case = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case = new_name.replace('norm' , 'layernorm' )
snake_case = """efficientformer.""" + new_name
else:
snake_case = """efficientformer.encoder.""" + new_name
return new_name
def __magic_name__ ( A , A ) -> Union[str, Any]:
for key in checkpoint.copy().keys():
snake_case = checkpoint.pop(A )
snake_case = val
return checkpoint
def __magic_name__ ( ) -> int:
snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case = Image.open(requests.get(A , stream=A ).raw )
return image
def __magic_name__ ( A , A , A , A ) -> Optional[int]:
snake_case = torch.load(A , map_location='cpu' )["""model"""]
snake_case = EfficientFormerConfig.from_json_file(A )
snake_case = EfficientFormerForImageClassificationWithTeacher(A )
snake_case = """_""".join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
snake_case = config.depths[-1] - config.num_metaad_blocks + 1
snake_case = convert_torch_checkpoint(A , A )
model.load_state_dict(A )
model.eval()
snake_case = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
snake_case = prepare_img()
snake_case = 2_5_6
snake_case = 2_2_4
snake_case = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
snake_case = processor(images=A , return_tensors='pt' ).pixel_values
# original processing pipeline
snake_case = Compose(
[
Resize(A , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(A ),
ToTensor(),
Normalize(A , A ),
] )
snake_case = image_transforms(A ).unsqueeze(0 )
assert torch.allclose(A , A )
snake_case = model(A )
snake_case = outputs.logits
snake_case = (1, 1_0_0_0)
if "l1" in model_name:
snake_case = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :1_0] , A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :1_0] , A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=A , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=A , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = jnp.floataa
snake_case_ = True
def _lowerCamelCase ( self ) -> List[Any]:
super().setup()
snake_case = nn.Dense(5, dtype=self.dtype )
def __call__( self, *lowercase_, **lowercase_ ) -> int:
snake_case = super().__call__(*lowerCamelCase_, **lowerCamelCase_ )
snake_case = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = FlaxBigBirdForNaturalQuestionsModule
def __magic_name__ ( A , A , A , A , A , A ) -> Optional[Any]:
def cross_entropy(A , A , A=None ):
snake_case = logits.shape[-1]
snake_case = (labels[..., None] == jnp.arange(_a )[None]).astype('f4' )
snake_case = jax.nn.log_softmax(_a , axis=-1 )
snake_case = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case = reduction(_a )
return loss
snake_case = partial(_a , reduction=jnp.mean )
snake_case = cross_entropy(_a , _a )
snake_case = cross_entropy(_a , _a )
snake_case = cross_entropy(_a , _a )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase :
snake_case_ = '''google/bigbird-roberta-base'''
snake_case_ = 3000
snake_case_ = 10500
snake_case_ = 128
snake_case_ = 3
snake_case_ = 1
snake_case_ = 5
# tx_args
snake_case_ = 3e-5
snake_case_ = 0.0
snake_case_ = 20000
snake_case_ = 0.0_095
snake_case_ = '''bigbird-roberta-natural-questions'''
snake_case_ = '''training-expt'''
snake_case_ = '''data/nq-training.jsonl'''
snake_case_ = '''data/nq-validation.jsonl'''
def _lowerCamelCase ( self ) -> int:
os.makedirs(self.base_dir, exist_ok=lowerCamelCase_ )
snake_case = os.path.join(self.base_dir, self.save_dir )
snake_case = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = 4096 # no dynamic padding on TPUs
def __call__( self, lowercase_ ) -> Tuple:
snake_case = self.collate_fn(lowerCamelCase_ )
snake_case = jax.tree_util.tree_map(lowerCamelCase_, lowerCamelCase_ )
return batch
def _lowerCamelCase ( self, lowercase_ ) -> Dict:
snake_case , snake_case = self.fetch_inputs(features['input_ids'] )
snake_case = {
'input_ids': jnp.array(lowerCamelCase_, dtype=jnp.intaa ),
'attention_mask': jnp.array(lowerCamelCase_, dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'], dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'], dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'], dtype=jnp.intaa ),
}
return batch
def _lowerCamelCase ( self, lowercase_ ) -> Union[str, Any]:
snake_case = [self._fetch_inputs(lowerCamelCase_ ) for ids in input_ids]
return zip(*lowerCamelCase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Tuple:
snake_case = [1 for _ in range(len(lowerCamelCase_ ) )]
while len(lowerCamelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __magic_name__ ( A , A , A=None ) -> Any:
if seed is not None:
snake_case = dataset.shuffle(seed=_a )
for i in range(len(_a ) // batch_size ):
snake_case = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_a )
@partial(jax.pmap , axis_name='batch' )
def __magic_name__ ( A , A , **A ) -> List[str]:
def loss_fn(A ):
snake_case = model_inputs.pop('start_labels' )
snake_case = model_inputs.pop('end_labels' )
snake_case = model_inputs.pop('pooled_labels' )
snake_case = state.apply_fn(**_a , params=_a , dropout_rng=_a , train=_a )
snake_case , snake_case , snake_case = outputs
return state.loss_fn(
_a , _a , _a , _a , _a , _a , )
snake_case , snake_case = jax.random.split(_a )
snake_case = jax.value_and_grad(_a )
snake_case , snake_case = grad_fn(state.params )
snake_case = jax.lax.pmean({'loss': loss} , axis_name='batch' )
snake_case = jax.lax.pmean(_a , 'batch' )
snake_case = state.apply_gradients(grads=_a )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __magic_name__ ( A , **A ) -> Any:
snake_case = model_inputs.pop('start_labels' )
snake_case = model_inputs.pop('end_labels' )
snake_case = model_inputs.pop('pooled_labels' )
snake_case = state.apply_fn(**_a , params=state.params , train=_a )
snake_case , snake_case , snake_case = outputs
snake_case = state.loss_fn(_a , _a , _a , _a , _a , _a )
snake_case = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class lowerCamelCase ( train_state.TrainState ):
snake_case_ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_=None ) -> List[str]:
snake_case = model.params
snake_case = TrainState.create(
apply_fn=model.__call__, params=lowerCamelCase_, tx=lowerCamelCase_, loss_fn=lowerCamelCase_, )
if ckpt_dir is not None:
snake_case , snake_case , snake_case , snake_case , snake_case = restore_checkpoint(lowerCamelCase_, lowerCamelCase_ )
snake_case = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
snake_case , snake_case = build_tx(**lowerCamelCase_ )
snake_case = train_state.TrainState(
step=lowerCamelCase_, apply_fn=model.__call__, params=lowerCamelCase_, tx=lowerCamelCase_, opt_state=lowerCamelCase_, )
snake_case = args
snake_case = data_collator
snake_case = lr
snake_case = params
snake_case = jax_utils.replicate(lowerCamelCase_ )
return state
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Any:
snake_case = self.args
snake_case = len(lowerCamelCase_ ) // args.batch_size
snake_case = jax.random.PRNGKey(0 )
snake_case = jax.random.split(lowerCamelCase_, jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case = jnp.array(0, dtype=jnp.floataa )
snake_case = get_batched_dataset(lowerCamelCase_, args.batch_size, seed=lowerCamelCase_ )
snake_case = 0
for batch in tqdm(lowerCamelCase_, total=lowerCamelCase_, desc=F'''Running EPOCH-{epoch}''' ):
snake_case = self.data_collator(lowerCamelCase_ )
snake_case , snake_case , snake_case = self.train_step_fn(lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
snake_case = jax_utils.unreplicate(state.step )
snake_case = running_loss.item() / i
snake_case = self.scheduler_fn(state_step - 1 )
snake_case = self.evaluate(lowerCamelCase_, lowerCamelCase_ )
snake_case = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowerCamelCase_ ) )
self.logger.log(lowerCamelCase_, commit=lowerCamelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''', state=lowerCamelCase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int:
snake_case = get_batched_dataset(lowerCamelCase_, self.args.batch_size )
snake_case = len(lowerCamelCase_ ) // self.args.batch_size
snake_case = jnp.array(0, dtype=jnp.floataa )
snake_case = 0
for batch in tqdm(lowerCamelCase_, total=lowerCamelCase_, desc='Evaluating ... ' ):
snake_case = self.data_collator(lowerCamelCase_ )
snake_case = self.val_step_fn(lowerCamelCase_, **lowerCamelCase_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> List[Any]:
snake_case = jax_utils.unreplicate(lowerCamelCase_ )
print(F'''SAVING CHECKPOINT IN {save_dir}''', end=' ... ' )
self.model_save_fn(lowerCamelCase_, params=state.params )
with open(os.path.join(lowerCamelCase_, 'opt_state.msgpack' ), 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args, os.path.join(lowerCamelCase_, 'args.joblib' ) )
joblib.dump(self.data_collator, os.path.join(lowerCamelCase_, 'data_collator.joblib' ) )
with open(os.path.join(lowerCamelCase_, 'training_state.json' ), 'w' ) as f:
json.dump({'step': state.step.item()}, lowerCamelCase_ )
print('DONE' )
def __magic_name__ ( A , A ) -> Dict:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(_a , 'flax_model.msgpack' ) , 'rb' ) as f:
snake_case = from_bytes(state.params , f.read() )
with open(os.path.join(_a , 'opt_state.msgpack' ) , 'rb' ) as f:
snake_case = from_bytes(state.opt_state , f.read() )
snake_case = joblib.load(os.path.join(_a , 'args.joblib' ) )
snake_case = joblib.load(os.path.join(_a , 'data_collator.joblib' ) )
with open(os.path.join(_a , 'training_state.json' ) , 'r' ) as f:
snake_case = json.load(_a )
snake_case = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __magic_name__ ( A , A , A , A ) -> Union[str, Any]:
snake_case = num_train_steps - warmup_steps
snake_case = optax.linear_schedule(init_value=_a , end_value=_a , transition_steps=_a )
snake_case = optax.linear_schedule(init_value=_a , end_value=1E-7 , transition_steps=_a )
snake_case = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __magic_name__ ( A , A , A , A , A ) -> Any:
def weight_decay_mask(A ):
snake_case = traverse_util.flatten_dict(_a )
snake_case = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_a )
snake_case = scheduler_fn(_a , _a , _a , _a )
snake_case = optax.adamw(learning_rate=_a , weight_decay=_a , mask=_a )
return tx, lr
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( ) -> Tuple:
snake_case = []
snake_case = 1
while len(__lowerCAmelCase ) < 1E6:
constant.append(str(__lowerCAmelCase ) )
i += 1
snake_case = ''''''.join(__lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __magic_name__ ( A ) -> datetime:
snake_case = year % 1_9
snake_case = year % 4
snake_case = year % 7
snake_case = math.floor(year / 1_0_0 )
snake_case = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
snake_case = leap_day_inhibits / 4
snake_case = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
snake_case = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
snake_case = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_lowercase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_lowercase , 4 , 1_8 )
else:
return datetime(_lowercase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase_ = '''will be''' if year > datetime.now().year else '''was'''
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = IFInpaintingPipeline
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self ) -> Union[str, Any]:
return self._get_dummy_components()
def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> Optional[Any]:
if str(lowerCAmelCase_ ).startswith('mps' ):
snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
snake_case = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
snake_case = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def _lowerCamelCase ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def _lowerCamelCase ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ) -> Dict:
self._test_save_load_local()
def _lowerCamelCase ( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2, )
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
snake_case_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Dict:
snake_case = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Dict:
snake_case = classifier('Who are you voting for in 2020?', candidate_labels='politics' )
self.assertEqual(SCREAMING_SNAKE_CASE_, {'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
snake_case = classifier('Who are you voting for in 2020?', ['politics'] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ )]} )
snake_case = classifier('Who are you voting for in 2020?', candidate_labels=['politics'] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ )]} )
snake_case = classifier('Who are you voting for in 2020?', candidate_labels='politics, public health' )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ), 1.0 )
snake_case = classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health'] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ), 1.0 )
snake_case = classifier(
'Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='This text is about {}' )
self.assertEqual(SCREAMING_SNAKE_CASE_, {'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case = classifier(['I am happy'], ['positive', 'negative'] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
], )
snake_case = classifier(['I am happy', 'I am sad'], ['positive', 'negative'] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{'sequence': ANY(SCREAMING_SNAKE_CASE_ ), 'labels': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], 'scores': [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier('', candidate_labels='politics' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_, candidate_labels='politics' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier('Who are you voting for in 2020?', candidate_labels='' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier('Who are you voting for in 2020?', candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
'Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='Not formatting template', )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
'Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template=SCREAMING_SNAKE_CASE_, )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( self, lowercase_ ) -> Union[str, Any]:
snake_case = zero_shot_classifier.model.config
snake_case = config.labelaid
snake_case = zero_shot_classifier.entailment_id
snake_case = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
snake_case = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
snake_case = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
snake_case = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
snake_case = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_, zero_shot_classifier.entailment_id )
@require_torch
def _lowerCamelCase ( self ) -> str:
snake_case = pipeline(
'zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt', )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100, candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = pipeline(
'zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt', )
snake_case = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
}, )
@require_tf
def _lowerCamelCase ( self ) -> int:
snake_case = pipeline(
'zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='tf', )
snake_case = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='pt' )
snake_case = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
}, )
snake_case = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='tf' )
snake_case = zero_shot_classifier(
'Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
}, )
snake_case = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
}, )
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A ) -> Optional[Any]:
snake_case = min(SCREAMING_SNAKE_CASE__ ) # min() finds the minimum value
snake_case = max(SCREAMING_SNAKE_CASE__ ) # max() finds the maximum value
snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
snake_case = 0
for count in range(SCREAMING_SNAKE_CASE__ ):
while holes[count] > 0:
holes[count] -= 1
snake_case = count + min_val
i += 1
def __magic_name__ ( ) -> Union[str, Any]:
snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE__ )
print('Sorted order is:' , ' '.join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case = text, pattern
snake_case = len(lowercase_ ), len(lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> int:
for i in range(self.patLen - 1, -1, -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self, lowercase_ ) -> int:
for i in range(self.patLen - 1, -1, -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ) -> list[int]:
snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case = self.mismatch_in_text(lowercase_ )
if mismatch_index == -1:
positions.append(lowercase_ )
else:
snake_case = self.match_in_pattern(self.text[mismatch_index] )
snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase_ = "ABAABA"
lowerCAmelCase_ = "AB"
lowerCAmelCase_ = BoyerMooreSearch(text, pattern)
lowerCAmelCase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCAmelCase_ = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = symbols("ct x y z")
def __magic_name__ ( A ) -> List[str]:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def __magic_name__ ( A ) -> Tuple:
return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 )
def __magic_name__ ( A ) -> int:
return np.array(
[
[gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0],
[-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __magic_name__ ( A , A = None ) -> List[str]:
if event is None:
snake_case = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCAmelCase_ = transform(2_9_9_7_9_2_4_5)
print("Example of four vector: ")
print(f"ct\' = {four_vector[0]}")
print(f"x\' = {four_vector[1]}")
print(f"y\' = {four_vector[2]}")
print(f"z\' = {four_vector[3]}")
# Substitute symbols with numerical values
lowerCAmelCase_ = {ct: c, x: 1, y: 1, z: 1}
lowerCAmelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"\n{numerical_vector}")
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __magic_name__ ( A , A , **A ) -> Tuple:
snake_case = AutoConfig.from_pretrained(a__ , **a__ )
snake_case = AutoModelForSeqaSeqLM.from_config(a__ )
model.save_pretrained(a__ )
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 371 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 0 |
'''simple docstring'''
def __magic_name__ ( A , A ) -> List[Any]:
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 350 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 0 |
'''simple docstring'''
class lowerCamelCase :
def __init__( self, lowercase_ ) -> None:
snake_case = len(lowercase_ )
snake_case = [0] * len_array
if len_array > 0:
snake_case = array[0]
for i in range(1, lowercase_ ):
snake_case = self.prefix_sum[i - 1] + array[i]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _lowerCamelCase ( self, lowercase_ ) -> bool:
snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_=1, lowercase_=False, **lowercase_ ) -> Dict:
super().__init__(**snake_case_ )
snake_case = vocab_size
snake_case = d_embed
snake_case = d_proj
snake_case = cutoffs + [vocab_size]
snake_case = [0] + self.cutoffs
snake_case = div_val
snake_case = self.cutoffs[0]
snake_case = len(self.cutoffs ) - 1
snake_case = self.shortlist_size + self.n_clusters
snake_case = keep_order
snake_case = []
snake_case = []
def _lowerCamelCase ( self, lowercase_ ) -> str:
if self.n_clusters > 0:
snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed), initializer='zeros', trainable=snake_case_, name='cluster_weight' )
snake_case = self.add_weight(
shape=(self.n_clusters,), initializer='zeros', trainable=snake_case_, name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj), initializer='zeros', trainable=snake_case_, name=F'''out_projs_._{i}''', )
self.out_projs.append(snake_case_ )
else:
self.out_projs.append(snake_case_ )
snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed), initializer='zeros', trainable=snake_case_, name=F'''out_layers_._{i}_._weight''', )
snake_case = self.add_weight(
shape=(self.vocab_size,), initializer='zeros', trainable=snake_case_, name=F'''out_layers_._{i}_._bias''', )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case = self.d_embed // (self.div_val**i)
snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj), initializer='zeros', trainable=snake_case_, name=F'''out_projs_._{i}''' )
self.out_projs.append(snake_case_ )
snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i), initializer='zeros', trainable=snake_case_, name=F'''out_layers_._{i}_._weight''', )
snake_case = self.add_weight(
shape=(r_idx - l_idx,), initializer='zeros', trainable=snake_case_, name=F'''out_layers_._{i}_._bias''', )
self.out_layers.append((weight, bias) )
super().build(snake_case_ )
@staticmethod
def _lowerCamelCase ( lowercase_, lowercase_, lowercase_, lowercase_=None ) -> Dict:
snake_case = x
if proj is not None:
snake_case = tf.einsum('ibd,ed->ibe', snake_case_, snake_case_ )
return tf.einsum('ibd,nd->ibn', snake_case_, snake_case_ ) + b
@staticmethod
def _lowerCamelCase ( lowercase_, lowercase_ ) -> str:
snake_case = shape_list(snake_case_ )
snake_case = tf.range(lp_size[0], dtype=target.dtype )
snake_case = tf.stack([r, target], 1 )
return tf.gather_nd(snake_case_, snake_case_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_=True, lowercase_=False ) -> int:
snake_case = 0
if self.n_clusters == 0:
snake_case = self._logit(snake_case_, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] )
if target is not None:
snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case_, logits=snake_case_ )
snake_case = tf.nn.log_softmax(snake_case_, axis=-1 )
else:
snake_case = shape_list(snake_case_ )
snake_case = []
snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case = (target >= l_idx) & (target < r_idx)
snake_case = tf.where(snake_case_ )
snake_case = tf.boolean_mask(snake_case_, snake_case_ ) - l_idx
if self.div_val == 1:
snake_case = self.out_layers[0][0][l_idx:r_idx]
snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case = self.out_layers[i][0]
snake_case = self.out_layers[i][1]
if i == 0:
snake_case = tf.concat([cur_W, self.cluster_weight], 0 )
snake_case = tf.concat([cur_b, self.cluster_bias], 0 )
snake_case = self._logit(snake_case_, snake_case_, snake_case_, self.out_projs[0] )
snake_case = tf.nn.log_softmax(snake_case_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case = tf.boolean_mask(snake_case_, snake_case_ )
snake_case = self._gather_logprob(snake_case_, snake_case_ )
else:
snake_case = self._logit(snake_case_, snake_case_, snake_case_, self.out_projs[i] )
snake_case = tf.nn.log_softmax(snake_case_ )
snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case_ )
if target is not None:
snake_case = tf.boolean_mask(snake_case_, snake_case_ )
snake_case = tf.boolean_mask(snake_case_, snake_case_ )
snake_case = self._gather_logprob(snake_case_, snake_case_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case_, -cur_logprob, shape_list(snake_case_ ) )
snake_case = tf.concat(snake_case_, axis=-1 )
if target is not None:
if return_mean:
snake_case = tf.reduce_mean(snake_case_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case_, name=self.name, aggregation='mean' if return_mean else '' )
return out
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
with open(UpperCamelCase_ ) as metadata_file:
snake_case = json.load(UpperCamelCase_ )
snake_case = LukeConfig(use_entity_aware_attention=UpperCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
snake_case = torch.load(UpperCamelCase_ , map_location='cpu' )['module']
# Load the entity vocab file
snake_case = load_original_entity_vocab(UpperCamelCase_ )
# add an entry for [MASK2]
snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case = AddedToken('<ent>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
snake_case = AddedToken('<ent2>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'r' ) as f:
snake_case = json.load(UpperCamelCase_ )
snake_case = 'MLukeTokenizer'
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
snake_case = MLukeTokenizer.from_pretrained(UpperCamelCase_ )
# Initialize the embeddings of the special tokens
snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
snake_case = state_dict['embeddings.word_embeddings.weight']
snake_case = word_emb[ent_init_index].unsqueeze(0 )
snake_case = word_emb[enta_init_index].unsqueeze(0 )
snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case = state_dict[bias_name]
snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case = F'''encoder.layer.{layer_index}.attention.self.'''
snake_case = state_dict[prefix + matrix_name]
snake_case = state_dict[prefix + matrix_name]
snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case = state_dict['entity_predictions.bias']
snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case = LukeForMaskedLM(config=UpperCamelCase_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
snake_case = state_dict[key]
else:
snake_case = state_dict[key]
snake_case , snake_case = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if set(UpperCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case = MLukeTokenizer.from_pretrained(UpperCamelCase_ , task='entity_classification' )
snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
snake_case = (0, 9)
snake_case = tokenizer(UpperCamelCase_ , entity_spans=[span] , return_tensors='pt' )
snake_case = model(**UpperCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case = torch.Size((1, 3_3, 7_6_8) )
snake_case = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case = torch.Size((1, 1, 7_6_8) )
snake_case = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case = MLukeTokenizer.from_pretrained(UpperCamelCase_ )
snake_case = 'Tokyo is the capital of <mask>.'
snake_case = (2_4, 3_0)
snake_case = tokenizer(UpperCamelCase_ , entity_spans=[span] , return_tensors='pt' )
snake_case = model(**UpperCamelCase_ )
snake_case = encoding['input_ids'][0].tolist()
snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase_ )
snake_case = outputs.entity_logits[0][0].argmax().item()
snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCamelCase_ ) )
model.save_pretrained(UpperCamelCase_ )
def __magic_name__ ( UpperCamelCase_ ) -> int:
snake_case = ['[MASK]', '[PAD]', '[UNK]']
snake_case = [json.loads(UpperCamelCase_ ) for line in open(UpperCamelCase_ )]
snake_case = {}
for entry in data:
snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case = entity_id
break
snake_case = F'''{language}:{entity_name}'''
snake_case = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCAmelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case = torch.manual_seed(0 )
snake_case = pipe(
image=a__, generator=a__, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images
snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 354 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCamelCase ( nn.Module ):
def __init__( self ) -> str:
super().__init__()
snake_case = nn.Linear(3, 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4, 5 )
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_ ) ) )
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
snake_case = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_, model.state_dict() )
snake_case = os.path.join(UpperCamelCase_, 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
snake_case = os.path.join(UpperCamelCase_, F'''{key}.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCamelCase ( self ) -> List[str]:
snake_case = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
snake_case = torch.randn(2, 3, dtype=UpperCamelCase_ )
with TemporaryDirectory() as tmp_dir:
snake_case = offload_weight(UpperCamelCase_, 'weight', UpperCamelCase_, {} )
snake_case = os.path.join(UpperCamelCase_, 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
self.assertDictEqual(UpperCamelCase_, {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase_ ).split('.' )[1]}} )
snake_case = load_offloaded_weight(UpperCamelCase_, index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase_, UpperCamelCase_ ) )
def _lowerCamelCase ( self ) -> Any:
snake_case = ModelForTest()
snake_case = model.state_dict()
snake_case = {k: v for k, v in state_dict.items() if 'linear2' not in k}
snake_case = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_, UpperCamelCase_ )
snake_case = OffloadedWeightsLoader(state_dict=UpperCamelCase_, save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_, weight_map[key] ) )
snake_case = {k: v for k, v in state_dict.items() if 'weight' in k}
snake_case = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_, UpperCamelCase_ )
snake_case = OffloadedWeightsLoader(state_dict=UpperCamelCase_, save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_, UpperCamelCase_ )
# Duplicates are removed
snake_case = OffloadedWeightsLoader(state_dict=UpperCamelCase_, save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_, weight_map[key] ) )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = {'a.1': 0, 'a.10': 1, 'a.2': 2}
snake_case = extract_submodules_state_dict(UpperCamelCase_, ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase_, {'a.1': 0, 'a.2': 2} )
snake_case = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
snake_case = extract_submodules_state_dict(UpperCamelCase_, ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase_, {'a.1.a': 0, 'a.2.a': 2} )
| 355 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__ ( A , A , A , A , A ) -> List[Any]:
for attribute in key.split('.' ):
snake_case = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
snake_case = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __magic_name__ ( A , A ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case = None
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
snake_case = True
elif name.split('.' )[0] == "proj":
snake_case = fairseq_model.proj
snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(lowerCamelCase_ )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
snake_case = '''weight_g'''
elif "weight_v" in name:
snake_case = '''weight_v'''
elif "bias" in name:
snake_case = '''bias'''
elif "weight" in name:
snake_case = '''weight'''
else:
snake_case = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def __magic_name__ ( A , A , A , A , A ) -> str:
snake_case = full_name.split('conv_layers.' )[-1]
snake_case = name.split('.' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def __magic_name__ ( A ) -> List[str]:
snake_case = emb.weight.shape
snake_case = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
snake_case = emb.weight.data
return lin_layer
def __magic_name__ ( A ) -> List[str]:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
snake_case = f.readlines()
snake_case = [line.split(' ' )[0] for line in lines]
snake_case = len(lowerCamelCase_ )
snake_case = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __magic_name__ ( A , A , A , A , A , A , A , ) -> List[str]:
snake_case = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
snake_case = SpeechaTextaConfig.from_pretrained(
lowerCamelCase_ , vocab_size=lowerCamelCase_ , decoder_layers=lowerCamelCase_ , do_stable_layer_norm=lowerCamelCase_ )
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case = model[0].eval()
# set weights for wav2vec2 encoder
snake_case = WavaVecaModel(lowerCamelCase_ )
snake_case = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
snake_case = SpeechaTextaForCausalLM(lowerCamelCase_ )
snake_case = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case = False
# add projection layer
snake_case = nn.Parameter(projection_layer.weight )
snake_case = nn.Parameter(projection_layer.bias )
snake_case = create_vocab_dict(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'vocab.json' ) , 'w' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
snake_case = SpeechaTextaTokenizer(os.path.join(lowerCamelCase_ , 'vocab.json' ) )
tokenizer.save_pretrained(lowerCamelCase_ )
snake_case = hf_wavavec.config.to_dict()
snake_case = tokenizer.pad_token_id
snake_case = tokenizer.bos_token_id
snake_case = tokenizer.eos_token_id
snake_case = '''speech_to_text_2'''
snake_case = '''wav2vec2'''
snake_case = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowerCAmelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def __magic_name__ ( A = 1_0_0_0_0_0_0 ) -> int:
snake_case = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
snake_case = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
snake_case = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.', __snake_case, )
super().__init__(*__snake_case, **__snake_case )
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __magic_name__ ( A , A , A ) -> str:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
snake_case = np.concatenate(lowerCAmelCase__ , axis=0 )
snake_case = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_5_5.0
snake_case = image.transpose(0 , 3 , 1 , 2 )
snake_case = 2.0 * image - 1.0
snake_case = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def __magic_name__ ( A , A , A , A=0.9_995 ) -> Optional[int]:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
snake_case = True
snake_case = va.device
snake_case = va.cpu().numpy()
snake_case = va.cpu().numpy()
snake_case = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
snake_case = (1 - t) * va + t * va
else:
snake_case = np.arccos(lowerCAmelCase__ )
snake_case = np.sin(lowerCAmelCase__ )
snake_case = theta_a * t
snake_case = np.sin(lowerCAmelCase__ )
snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case = sin_theta_t / sin_theta_a
snake_case = sa * va + sa * va
if inputs_are_torch:
snake_case = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def __magic_name__ ( A , A ) -> int:
snake_case = F.normalize(lowerCAmelCase__ , dim=-1 )
snake_case = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __magic_name__ ( A , A ) -> Optional[int]:
for param in model.parameters():
snake_case = value
class lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_=None, lowercase_=None, lowercase_=None, ) -> Tuple:
super().__init__()
self.register_modules(
vae=A__, text_encoder=A__, clip_model=A__, tokenizer=A__, unet=A__, scheduler=A__, feature_extractor=A__, coca_model=A__, coca_tokenizer=A__, coca_transform=A__, )
snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size, A__ )
else feature_extractor.size['shortest_edge']
)
snake_case = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, A__ )
set_requires_grad(self.clip_model, A__ )
def _lowerCamelCase ( self, lowercase_ = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.enable_attention_slicing(A__ )
def _lowerCamelCase ( self ) -> int:
set_requires_grad(self.vae, A__ )
def _lowerCamelCase ( self ) -> Tuple:
set_requires_grad(self.vae, A__ )
def _lowerCamelCase ( self ) -> Any:
set_requires_grad(self.unet, A__ )
def _lowerCamelCase ( self ) -> List[Any]:
set_requires_grad(self.unet, A__ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> int:
# get the original timestep using init_timestep
snake_case = min(int(num_inference_steps * strength ), A__ )
snake_case = max(num_inference_steps - init_timestep, 0 )
snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_=None ) -> str:
if not isinstance(A__, torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(A__ )}''' )
snake_case = image.to(device=A__, dtype=A__ )
if isinstance(A__, A__ ):
snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A__ )
]
snake_case = torch.cat(A__, dim=0 )
else:
snake_case = self.vae.encode(A__ ).latent_dist.sample(A__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 0.18_215 * init_latents
snake_case = init_latents.repeat_interleave(A__, dim=0 )
snake_case = randn_tensor(init_latents.shape, generator=A__, device=A__, dtype=A__ )
# get latents
snake_case = self.scheduler.add_noise(A__, A__, A__ )
snake_case = init_latents
return latents
def _lowerCamelCase ( self, lowercase_ ) -> Union[str, Any]:
snake_case = self.coca_transform(A__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>', '' ).rstrip(' .,' )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Tuple:
snake_case = self.feature_extractor.preprocess(A__ )
snake_case = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case = self.clip_model.get_image_features(A__ )
snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=A__ )
snake_case = image_embeddings_clip.repeat_interleave(A__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Any:
snake_case = latents.detach().requires_grad_()
snake_case = self.scheduler.scale_model_input(A__, A__ )
# predict the noise residual
snake_case = self.unet(A__, A__, encoder_hidden_states=A__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case = self.scheduler.alphas_cumprod[timestep]
snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case = torch.sqrt(A__ )
snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, A__ ):
snake_case = self.scheduler.sigmas[index]
snake_case = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 1 / 0.18_215 * sample
snake_case = self.vae.decode(A__ ).sample
snake_case = (image / 2 + 0.5).clamp(0, 1 )
snake_case = transforms.Resize(self.feature_extractor_size )(A__ )
snake_case = self.normalize(A__ ).to(latents.dtype )
snake_case = self.clip_model.get_image_features(A__ )
snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=A__ )
snake_case = spherical_dist_loss(A__, A__ ).mean() * clip_guidance_scale
snake_case = -torch.autograd.grad(A__, A__ )[0]
if isinstance(self.scheduler, A__ ):
snake_case = latents.detach() + grads * (sigma**2)
snake_case = noise_pred_original
else:
snake_case = noise_pred_original - torch.sqrt(A__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = 512, lowercase_ = 512, lowercase_ = 0.6, lowercase_ = 50, lowercase_ = 7.5, lowercase_ = 1, lowercase_ = 0.0, lowercase_ = 100, lowercase_ = None, lowercase_ = "pil", lowercase_ = True, lowercase_ = 0.8, lowercase_ = 0.1, lowercase_ = 0.1, ) -> List[Any]:
if isinstance(A__, A__ ) and len(A__ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(A__ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A__, torch.Generator ) and batch_size > 1:
snake_case = [generator] + [None] * (batch_size - 1)
snake_case = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
snake_case = [x[0] for x in coca_is_none if x[1]]
snake_case = ', '.join(A__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A__ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case = self.get_image_description(A__ )
if style_prompt is None:
if len(A__ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case = self.get_image_description(A__ )
# get prompt text embeddings for content and style
snake_case = self.tokenizer(
A__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=A__, return_tensors='pt', )
snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case = self.tokenizer(
A__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=A__, return_tensors='pt', )
snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case = slerp(A__, A__, A__ )
# duplicate text embeddings for each generation per prompt
snake_case = text_embeddings.repeat_interleave(A__, dim=0 )
# set timesteps
snake_case = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case = {}
if accepts_offset:
snake_case = 1
self.scheduler.set_timesteps(A__, **A__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case , snake_case = self.get_timesteps(A__, A__, self.device )
snake_case = timesteps[:1].repeat(A__ )
# Preprocess image
snake_case = preprocess(A__, A__, A__ )
snake_case = self.prepare_latents(
A__, A__, A__, text_embeddings.dtype, self.device, A__ )
snake_case = preprocess(A__, A__, A__ )
snake_case = self.prepare_latents(
A__, A__, A__, text_embeddings.dtype, self.device, A__ )
snake_case = slerp(A__, A__, A__ )
if clip_guidance_scale > 0:
snake_case = self.get_clip_image_embeddings(A__, A__ )
snake_case = self.get_clip_image_embeddings(A__, A__ )
snake_case = slerp(
A__, A__, A__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case = content_text_input.input_ids.shape[-1]
snake_case = self.tokenizer([''], padding='max_length', max_length=A__, return_tensors='pt' )
snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case = uncond_embeddings.repeat_interleave(A__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case = torch.randn(A__, generator=A__, device='cpu', dtype=A__ ).to(
self.device )
else:
snake_case = torch.randn(A__, generator=A__, device=self.device, dtype=A__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case = {}
if accepts_eta:
snake_case = eta
# check if the scheduler accepts generator
snake_case = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case = generator
with self.progress_bar(total=A__ ):
for i, t in enumerate(A__ ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = self.scheduler.scale_model_input(A__, A__ )
# predict the noise residual
snake_case = self.unet(A__, A__, encoder_hidden_states=A__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case , snake_case = noise_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case , snake_case = self.cond_fn(
A__, A__, A__, A__, A__, A__, A__, )
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(A__, A__, A__, **A__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 1 / 0.18_215 * latents
snake_case = self.vae.decode(A__ ).sample
snake_case = (image / 2 + 0.5).clamp(0, 1 )
snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(A__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A__, nsfw_content_detected=A__ )
| 359 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"gpt-neox-20b": 2_0_4_8,
}
class lowerCamelCase ( __a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="<|endoftext|>", lowercase_="<|endoftext|>", lowercase_="<|endoftext|>", lowercase_=False, **lowercase_, ) -> Any:
super().__init__(
a__, a__, tokenizer_file=a__, unk_token=a__, bos_token=a__, eos_token=a__, add_prefix_space=a__, **a__, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', a__ ) != add_prefix_space:
snake_case = getattr(a__, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**a__ )
snake_case = add_prefix_space
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[str]:
snake_case = self._tokenizer.model.save(a__, name=a__ )
return tuple(a__ )
def _lowerCamelCase ( self, lowercase_ ) -> Union[str, Any]:
snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__, add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case = input_ids[-self.model_max_length :]
return input_ids
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase ( lowerCamelCase__ ):
snake_case_ = '''ClapFeatureExtractor'''
snake_case_ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self, lowercase_, lowercase_ ) -> int:
super().__init__(__A, __A )
def __call__( self, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_ ) -> Dict:
snake_case = kwargs.pop('sampling_rate', __A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
snake_case = self.tokenizer(__A, return_tensors=__A, **__A )
if audios is not None:
snake_case = self.feature_extractor(
__A, sampling_rate=__A, return_tensors=__A, **__A )
if text is not None and audios is not None:
snake_case = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ), tensor_type=__A )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> List[Any]:
return self.tokenizer.batch_decode(*__A, **__A )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> List[Any]:
return self.tokenizer.decode(*__A, **__A )
@property
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.tokenizer.model_input_names
snake_case = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 361 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''char'''
snake_case_ = '''bpe'''
snake_case_ = '''wp'''
lowerCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ['''image_processor''', '''char_tokenizer''']
snake_case_ = '''ViTImageProcessor'''
snake_case_ = '''MgpstrTokenizer'''
def __init__( self, lowercase_=None, lowercase_=None, **lowercase_ ) -> Optional[int]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', __a, )
snake_case = kwargs.pop('feature_extractor' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case = tokenizer
snake_case = AutoTokenizer.from_pretrained('gpt2' )
snake_case = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(__a, __a )
def __call__( self, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_ ) -> int:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case = self.image_processor(__a, return_tensors=__a, **__a )
if text is not None:
snake_case = self.char_tokenizer(__a, return_tensors=__a, **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case = encodings['input_ids']
return inputs
def _lowerCamelCase ( self, lowercase_ ) -> Union[str, Any]:
snake_case , snake_case , snake_case = sequences
snake_case = char_preds.size(0 )
snake_case , snake_case = self._decode_helper(__a, 'char' )
snake_case , snake_case = self._decode_helper(__a, 'bpe' )
snake_case , snake_case = self._decode_helper(__a, 'wp' )
snake_case = []
snake_case = []
for i in range(__a ):
snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case = scores.index(max(__a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case = {}
snake_case = final_strs
snake_case = final_scores
snake_case = char_strs
snake_case = bpe_strs
snake_case = wp_strs
return out
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]:
if format == DecodeType.CHARACTER:
snake_case = self.char_decode
snake_case = 1
snake_case = '[s]'
elif format == DecodeType.BPE:
snake_case = self.bpe_decode
snake_case = 2
snake_case = '#'
elif format == DecodeType.WORDPIECE:
snake_case = self.wp_decode
snake_case = 102
snake_case = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case , snake_case = [], []
snake_case = pred_logits.size(0 )
snake_case = pred_logits.size(1 )
snake_case , snake_case = pred_logits.topk(1, dim=-1, largest=__a, sorted=__a )
snake_case = preds_index.view(-1, __a )[:, 1:]
snake_case = decoder(__a )
snake_case , snake_case = torch.nn.functional.softmax(__a, dim=2 ).max(dim=2 )
snake_case = preds_max_prob[:, 1:]
for index in range(__a ):
snake_case = preds_str[index].find(__a )
snake_case = preds_str[index][:pred_eos]
snake_case = preds_index[index].cpu().tolist()
snake_case = pred_index.index(__a ) if eos_token in pred_index else -1
snake_case = preds_max_prob[index][: pred_eos_index + 1]
snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__a )
conf_scores.append(__a )
return dec_strs, conf_scores
def _lowerCamelCase ( self, lowercase_ ) -> Tuple:
snake_case = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(__a )]
return decode_strs
def _lowerCamelCase ( self, lowercase_ ) -> int:
return self.bpe_tokenizer.batch_decode(__a )
def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]:
snake_case = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(__a )]
return decode_strs
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=14, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_token_type_ids
snake_case = use_input_mask
snake_case = use_labels
snake_case = use_mc_token_ids
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = self.vocab_size - 1
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case = None
if self.use_mc_token_ids:
snake_case = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case = ids_tensor([self.batch_size], self.num_choices )
snake_case = self.get_config()
snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCamelCase ( self ) -> Union[str, Any]:
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> Tuple:
snake_case = CTRLModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
model(UpperCamelCase__, token_type_ids=UpperCamelCase__, head_mask=UpperCamelCase__ )
model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
snake_case = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> Dict:
snake_case = CTRLLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case = model(UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> Tuple:
snake_case = self.num_labels
snake_case = CTRLForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = model(UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
snake_case_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
snake_case_ = (CTRLLMHeadModel,) if is_torch_available() else ()
snake_case_ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowerCamelCase ( self ) -> Dict:
snake_case = CTRLModelTester(self )
snake_case = ConfigTester(self, config_class=UpperCamelCase__, n_embd=37 )
def _lowerCamelCase ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ) -> Union[str, Any]:
pass
@slow
def _lowerCamelCase ( self ) -> int:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = CTRLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def _lowerCamelCase ( self ) -> List[Any]:
pass
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(UpperCamelCase__ )
snake_case = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=UpperCamelCase__ ) # Legal the president is
snake_case = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
snake_case = model.generate(UpperCamelCase__, do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist(), UpperCamelCase__ )
| 363 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowerCAmelCase_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCAmelCase_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCAmelCase_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], )
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int:
snake_case = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
snake_case = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
snake_case = evaluate(dataset=_A, predictions=_A )
return score
| 364 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __magic_name__ ( A ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __magic_name__ ( A ) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
snake_case = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase_ = get_tests_dir("fixtures")
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> str:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 500
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowercase_ ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def _lowerCamelCase ( self ) -> Dict:
with self.assertRaises(lowercase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
snake_case = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(lowercase_ )
@is_staging_test
class lowerCamelCase ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls ) -> Dict:
snake_case = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _lowerCamelCase ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def _lowerCamelCase ( self ) -> str:
snake_case = ViTImageProcessor.from_pretrained(lowercase_ )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase_, repo_id='test-image-processor', push_to_hub=lowercase_, use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = ViTImageProcessor.from_pretrained(lowercase_ )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase_, repo_id='valid_org/test-image-processor-org', push_to_hub=lowercase_, use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
def _lowerCamelCase ( self ) -> Any:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(lowercase_ )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
snake_case = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''', trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModel)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowerCamelCase ( _BaseAutoModelClass ):
snake_case_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCAmelCase_ = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
lowerCAmelCase_ = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def lowerCamelCase ( ) -> List[str]:
snake_case = calculate_rouge(snake_case_ , snake_case_ , bootstrap_aggregation=snake_case_ , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(snake_case_ , snake_case_ )
snake_case = calculate_rouge(snake_case_ , snake_case_ , bootstrap_aggregation=snake_case_ , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> str:
snake_case = 'rougeLsum'
snake_case = calculate_rouge(snake_case_ , snake_case_ , newline_sep=snake_case_ , rouge_keys=[k] )[k]
snake_case = calculate_rouge(snake_case_ , snake_case_ , newline_sep=snake_case_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> Optional[int]:
snake_case = ['rouge1', 'rouge2', 'rougeL']
snake_case = calculate_rouge(snake_case_ , snake_case_ , newline_sep=snake_case_ , rouge_keys=snake_case_ )
snake_case = calculate_rouge(snake_case_ , snake_case_ , newline_sep=snake_case_ , rouge_keys=snake_case_ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> str:
snake_case = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .',
]
snake_case = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(snake_case_ , snake_case_ , newline_sep=snake_case_ ) == calculate_rouge(snake_case_ , snake_case_ , newline_sep=snake_case_ )
def lowerCamelCase ( ) -> Optional[Any]:
snake_case = [
'\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" '
]
snake_case = [
' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
snake_case = calculate_rouge(snake_case_ , snake_case_ , rouge_keys=['rougeLsum'] , newline_sep=snake_case_ )['rougeLsum']
snake_case = calculate_rouge(snake_case_ , snake_case_ , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def lowerCamelCase ( ) -> Any:
snake_case = Path('examples/seq2seq/test_data/wmt_en_ro' )
snake_case = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(snake_case_ , snake_case_ )
snake_case = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar("T")
class lowerCamelCase ( Generic[T] ):
def __init__( self, lowercase_ = True ) -> Optional[Any]:
snake_case = {} # dictionary of lists
snake_case = directed
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
self.adj_list[destination_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
snake_case = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase )
snake_case = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case = [destination_vertex]
snake_case = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
snake_case = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case = [destination_vertex]
snake_case = []
return self
def __repr__( self ) -> List[str]:
return pformat(self.adj_list )
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.